def test_ioclass_file_offset(prepare_and_cleanup):
    cache, core = prepare()

    ioclass_id = 1
    iterations = 100
    dd_size = Size(4, Unit.KibiByte)
    dd_count = 1
    min_cached_offset = 16384
    max_cached_offset = 65536

    ioclass_config.add_ioclass(
        ioclass_id=ioclass_id,
        eviction_priority=1,
        allocation=True,
        rule=
        f"file_offset:gt:{min_cached_offset}&file_offset:lt:{max_cached_offset}&done",
        ioclass_config_path=ioclass_config_path,
    )
    casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)

    TestProperties.LOGGER.info(
        f"Preparing filesystem and mounting {core.system_path} at {mountpoint}"
    )
    core.create_filesystem(Filesystem.ext3)
    core.mount(mountpoint)

    cache.flush_cache()

    # Since ioclass rule consists of strict inequalities, 'seek' can't be set to first
    # nor last sector
    min_seek = int(
        (min_cached_offset + Unit.Blocks4096.value) / Unit.Blocks4096.value)
    max_seek = int(
        (max_cached_offset - min_cached_offset - Unit.Blocks4096.value) /
        Unit.Blocks4096.value)
    TestProperties.LOGGER.info(f"Writing to file within cached offset range")
    for i in range(iterations):
        file_offset = random.choice(range(min_seek, max_seek))
        dd = (Dd().input("/dev/zero").output(f"{mountpoint}/tmp_file").count(
            dd_count).block_size(dd_size).seek(file_offset))
        dd.run()
        sync()
        stats = cache.get_cache_statistics(io_class_id=ioclass_id)
        assert (stats["dirty"].get_value(
            Unit.Blocks4096) == 1), f"Offset not cached: {file_offset}"
        cache.flush_cache()

    min_seek = 0
    max_seek = int(min_cached_offset / Unit.Blocks4096.value)
    TestProperties.LOGGER.info(
        f"Writing to file outside of cached offset range")
    for i in range(iterations):
        file_offset = random.choice(range(min_seek, max_seek))
        dd = (Dd().input("/dev/zero").output(f"{mountpoint}/tmp_file").count(
            dd_count).block_size(dd_size).seek(file_offset))
        dd.run()
        sync()
        stats = cache.get_cache_statistics(io_class_id=ioclass_id)
        assert (stats["dirty"].get_value(Unit.Blocks4096) == 0
                ), f"Inappropriately cached offset: {file_offset}"
def test_ioclass_lba(prepare_and_cleanup):
    """Write data to random lba and check if it is cached according to range
    defined in ioclass rule"""
    cache, core = prepare()
    ioclass_id = 1
    min_cached_lba = 56
    max_cached_lba = 200
    iterations = 100
    dd_size = Size(1, Unit.Blocks512)
    dd_count = 1

    # Prepare ioclass config
    ioclass_config.add_ioclass(
        ioclass_id=ioclass_id,
        eviction_priority=1,
        allocation=True,
        rule=f"lba:ge:{min_cached_lba}&lba:le:{max_cached_lba}&done",
        ioclass_config_path=ioclass_config_path,
    )

    # Prepare cache for test
    casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)

    cache.flush_cache()

    # Check if lbas from defined range are cached
    dirty_count = 0
    # '8' step is set to prevent writting cache line more than once
    TestProperties.LOGGER.info(
        f"Writing to one sector in each cache line from range.")
    for lba in range(min_cached_lba, max_cached_lba, 8):
        dd = (Dd().input("/dev/zero").output(f"{core.system_path}").count(
            dd_count).block_size(dd_size).seek(lba))
        dd.run()
        sync()
        dirty_count += 1

        stats = cache.get_cache_statistics(per_io_class=True,
                                           io_class_id=ioclass_id)
        assert (stats["dirty"].get_value(
            Unit.Blocks4096) == dirty_count), f"LBA {lba} not cached"

    cache.flush_cache()

    # Check if lba outside of defined range are not cached
    TestProperties.LOGGER.info(
        f"Writing to random sectors outside of cached range.")
    for i in range(iterations):
        rand_lba = random.randrange(2000)
        if min_cached_lba <= rand_lba <= max_cached_lba:
            continue
        dd = (Dd().input("/dev/zero").output(f"{core.system_path}").count(
            dd_count).block_size(dd_size).seek(rand_lba))
        dd.run()
        sync()

        stats = cache.get_cache_statistics(per_io_class=True,
                                           io_class_id=ioclass_id)
        assert (stats["dirty"].get_value(
            Unit.Blocks4096) == 0), f"Inappropriately cached lba: {rand_lba}"
def test_ioclass_stats_set(prepare_and_cleanup):
    """Try to retrieve stats for all set ioclasses"""
    prepare()
    min_ioclass_id = 1
    max_ioclass_id = 11

    ioclass_config.create_ioclass_config(
        add_default_rule=True, ioclass_config_path=ioclass_config_path
    )

    TestProperties.LOGGER.info("Preparing ioclass config file")
    for i in range(min_ioclass_id, max_ioclass_id):
        ioclass_config.add_ioclass(
            ioclass_id=(i + 10),
            eviction_priority=22,
            allocation=True,
            rule=f"file_size:le:{4096*i}&done",
            ioclass_config_path=ioclass_config_path,
        )
    casadm.load_io_classes(cache_id, file=ioclass_config_path)

    TestProperties.LOGGER.info("Preparing ioclass config file")
    for i in range(32):
        if i != 0 or i not in range(min_ioclass_id, max_ioclass_id):
            with pytest.raises(Exception):
                assert casadm_parser.get_statistics(
                    cache_id=cache_id, io_class_id=True, filter=[StatsFilter.conf]
                )
def test_ioclass_process_name(prepare_and_cleanup):
    """Check if data generated by process with particular name is cached"""
    cache, core = prepare()

    ioclass_id = 1
    dd_size = Size(4, Unit.KibiByte)
    dd_count = 1
    iterations = 100

    ioclass_config.add_ioclass(
        ioclass_id=ioclass_id,
        eviction_priority=1,
        allocation=True,
        rule=f"process_name:dd&done",
        ioclass_config_path=ioclass_config_path,
    )
    casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)

    cache.flush_cache()

    Udev.disable()

    TestProperties.LOGGER.info(
        f"Check if all data generated by dd process is cached.")
    for i in range(iterations):
        dd = (Dd().input("/dev/zero").output(
            core.system_path).count(dd_count).block_size(dd_size).seek(i))
        dd.run()
        sync()
        time.sleep(0.1)
        stats = cache.get_cache_statistics(io_class_id=ioclass_id)
        assert stats["dirty"].get_value(Unit.Blocks4096) == (i + 1) * dd_count
def test_ioclass_conditions_and(filesystem):
    """
        title: IO class condition 'and'.
        description: |
          Load config with IO class combining 5 conditions contradicting
          at least one other condition.
        pass_criteria:
          - No kernel bug.
          - Every IO fulfilling one of the conditions is not classified.
    """

    file_size = Size(random.randint(25, 50), Unit.MebiByte)
    file_size_bytes = int(file_size.get_value(Unit.Byte))

    with TestRun.step("Prepare cache and core. Disable udev."):
        cache, core = prepare()
        Udev.disable()

    with TestRun.step("Create and load IO class config file."):
        # directories OR condition
        ioclass_config.add_ioclass(
            ioclass_id=1,
            eviction_priority=1,
            allocation=True,
            rule=
            f"file_size:gt:{file_size_bytes}&file_size:lt:{file_size_bytes}&"
            f"file_size:ge:{file_size_bytes}&file_size:le:{file_size_bytes}&"
            f"file_size:eq:{file_size_bytes}",
            ioclass_config_path=ioclass_config_path,
        )
        casadm.load_io_classes(cache_id=cache.cache_id,
                               file=ioclass_config_path)

    TestRun.LOGGER.info(f"Preparing {filesystem.name} filesystem "
                        f"and mounting {core.system_path} at {mountpoint}")
    core.create_filesystem(filesystem)
    core.mount(mountpoint)
    sync()

    base_occupancy = cache.get_io_class_statistics(
        io_class_id=1).usage_stats.occupancy
    # Perform IO
    for size in [
            file_size, file_size + Size(1, Unit.MebiByte),
            file_size - Size(1, Unit.MebiByte)
    ]:
        (Fio().create_command().io_engine(
            IoEngine.libaio).size(size).read_write(
                ReadWrite.write).target(f"{mountpoint}/test_file").run())
        sync()
        new_occupancy = cache.get_io_class_statistics(
            io_class_id=1).usage_stats.occupancy

        if new_occupancy != base_occupancy:
            TestRun.fail(
                "Unexpected occupancy increase!\n"
                f"Expected: {base_occupancy}, actual: {new_occupancy}")
def test_ioclass_conditions_or(filesystem):
    """
        title: IO class condition 'or'.
        description: |
          Load config with IO class combining 5 contradicting conditions connected by OR operator.
        pass_criteria:
          - No kernel bug.
          - Every IO fulfilling one condition is classified properly.
    """

    with TestRun.step("Prepare cache and core. Disable udev."):
        cache, core = prepare()
        Udev.disable()

    with TestRun.step("Create and load IO class config file."):
        # directories OR condition
        ioclass_config.add_ioclass(
            ioclass_id=1,
            eviction_priority=1,
            allocation=True,
            rule=
            f"directory:{mountpoint}/dir1|directory:{mountpoint}/dir2|directory:"
            f"{mountpoint}/dir3|directory:{mountpoint}/dir4|directory:{mountpoint}/dir5",
            ioclass_config_path=ioclass_config_path,
        )
        casadm.load_io_classes(cache_id=cache.cache_id,
                               file=ioclass_config_path)

    with TestRun.step(f"Prepare {filesystem.name} filesystem "
                      f"and mount {core.system_path} at {mountpoint}."):
        core.create_filesystem(filesystem)
        core.mount(mountpoint)
        for i in range(1, 6):
            fs_utils.create_directory(f"{mountpoint}/dir{i}")
        sync()

    with TestRun.step(
            "Perform IO fulfilling each condition and check if occupancy raises."
    ):
        for i in range(1, 6):
            file_size = Size(random.randint(25, 50), Unit.MebiByte)
            base_occupancy = cache.get_io_class_statistics(
                io_class_id=1).usage_stats.occupancy
            (Fio().create_command().io_engine(
                IoEngine.libaio).size(file_size).read_write(
                    ReadWrite.write).target(
                        f"{mountpoint}/dir{i}/test_file").run())
            sync()
            new_occupancy = cache.get_io_class_statistics(
                io_class_id=1).usage_stats.occupancy

            if new_occupancy != base_occupancy + file_size:
                TestRun.fail(
                    "Occupancy has not increased correctly!\n"
                    f"Expected: {base_occupancy + file_size}, actual: {new_occupancy}"
                )
예제 #7
0
def test_ioclass_stats_basic(random_cls):
    """
        title: Basic test for retrieving IO class statistics.
        description: |
          Check if statistics are retrieved only for configured IO classes.
        pass_criteria:
          - Statistics are retrieved for configured IO classes.
          - Error is displayed when retrieving statistics for non-configured IO class.
          - Error is displayed when retrieving statistics for out of range IO class id.
    """

    min_ioclass_id = 11
    max_ioclass_id = 21

    with TestRun.step("Test prepare"):
        prepare(random_cls)

    with TestRun.step("Prepare IO class config file"):
        ioclass_list = []
        for class_id in range(min_ioclass_id, max_ioclass_id):
            ioclass_list.append(IoClass(
                class_id=class_id,
                rule=f"file_size:le:{4096 * class_id}&done",
                priority=22
            ))
        IoClass.save_list_to_config_file(ioclass_list, True)

    with TestRun.step("Load IO class config file"):
        casadm.load_io_classes(cache_id, file=ioclass_config.default_config_file_path)

    with TestRun.step("Try retrieving IO class stats for all allowed id values "
                      "and one out of range id"):
        for class_id in range(ioclass_config.MAX_IO_CLASS_ID + 2):
            out_of_range = " out of range" if class_id > ioclass_config.MAX_IO_CLASS_ID else ""
            with TestRun.group(f"Checking{out_of_range} IO class id {class_id}..."):
                expected = class_id == 0 or class_id in range(min_ioclass_id, max_ioclass_id)
                try:
                    casadm.print_statistics(
                        cache_id=cache_id,
                        io_class_id=class_id,
                        per_io_class=True)
                    if not expected:
                        TestRun.LOGGER.error(
                            f"Stats retrieved for not configured IO class {class_id}")
                except CmdException as e:
                    if expected:
                        TestRun.LOGGER.error(f"Stats not retrieved for IO class id: {class_id}")
                    elif class_id <= ioclass_config.MAX_IO_CLASS_ID:
                        if not check_stderr_msg(e.output, get_stats_ioclass_id_not_configured):
                            TestRun.LOGGER.error(
                                f"Wrong message for unused IO class id: {class_id}")
                    elif not check_stderr_msg(e.output, get_stats_ioclass_id_out_of_range):
                        TestRun.LOGGER.error(
                            f"Wrong message for out of range IO class id: {class_id}")
def test_ioclass_pid(prepare_and_cleanup):
    cache, core = prepare()

    ioclass_id = 1
    iterations = 20
    dd_count = 100
    dd_size = Size(4, Unit.KibiByte)

    Udev.disable()

    # Since 'dd' has to be executed right after writing pid to 'ns_last_pid',
    # 'dd' command is created and is appended to 'echo' command instead of running it
    dd_command = str(Dd().input("/dev/zero").output(
        core.system_path).count(dd_count).block_size(dd_size))

    for i in range(iterations):
        cache.flush_cache()

        output = TestProperties.executor.execute(
            "cat /proc/sys/kernel/ns_last_pid")
        if output.exit_code != 0:
            raise Exception(
                f"Failed to retrieve pid. stdout: {output.stdout} \n stderr :{output.stderr}"
            )

        # Few pids might be used by system during test preparation
        pid = int(output.stdout) + 50

        ioclass_config.add_ioclass(
            ioclass_id=ioclass_id,
            eviction_priority=1,
            allocation=True,
            rule=f"pid:eq:{pid}&done",
            ioclass_config_path=ioclass_config_path,
        )
        casadm.load_io_classes(cache_id=cache.cache_id,
                               file=ioclass_config_path)

        TestProperties.LOGGER.info(f"Running dd with pid {pid}")
        # pid saved in 'ns_last_pid' has to be smaller by one than target dd pid
        dd_and_pid_command = (
            f"echo {pid-1} > /proc/sys/kernel/ns_last_pid && {dd_command}")
        output = TestProperties.executor.execute(dd_and_pid_command)
        if output.exit_code != 0:
            raise Exception(
                f"Failed to run dd with target pid. "
                f"stdout: {output.stdout} \n stderr :{output.stderr}")
        sync()
        stats = cache.get_cache_statistics(per_io_class=True,
                                           io_class_id=ioclass_id)
        assert stats["dirty"].get_value(Unit.Blocks4096) == dd_count

        ioclass_config.remove_ioclass(ioclass_id)
def test_ioclass_file_extension_preexisting_filesystem(prepare_and_cleanup):
    """Create files on filesystem, add device with filesystem as a core,
        write data to files and check if they are cached properly"""
    cache, core = prepare()
    ioclass_id = 1
    extensions = ["tmp", "tm", "out", "txt", "log", "123"]
    dd_size = Size(4, Unit.KibiByte)
    dd_count = 10

    TestProperties.LOGGER.info(f"Preparing files on raw block device")
    casadm.remove_core(cache.cache_id, core_id=core.core_id)
    core.core_device.create_filesystem(Filesystem.ext3)
    core.core_device.mount(mountpoint)

    # Prepare files
    for ext in extensions:
        dd = (Dd().input("/dev/zero").output(f"{mountpoint}/test_file.{ext}").
              count(dd_count).block_size(dd_size))
        dd.run()
    core.core_device.unmount()

    # Prepare ioclass config
    rule = "|".join([f"extension:{ext}" for ext in extensions])
    ioclass_config.add_ioclass(
        ioclass_id=ioclass_id,
        eviction_priority=1,
        allocation=True,
        rule=f"{rule}&done",
        ioclass_config_path=ioclass_config_path,
    )

    # Prepare cache for test
    TestProperties.LOGGER.info(
        f"Adding device with preexisting data as a core")
    core = casadm.add_core(cache, core_dev=core.core_device)
    casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)

    core.mount(mountpoint)
    cache.flush_cache()

    # Check if files with proper extensions are cached
    TestProperties.LOGGER.info(f"Writing to file with cached extension.")
    for ext in extensions:
        dd = (Dd().input("/dev/zero").output(f"{mountpoint}/test_file.{ext}").
              count(dd_count).block_size(dd_size))
        dd.run()
        sync()
        stats = cache.get_cache_statistics(per_io_class=True,
                                           io_class_id=ioclass_id)
        assert (stats["dirty"].get_value(
            Unit.Blocks4096) == (extensions.index(ext) + 1) * dd_count)
def test_ioclass_file_extension(prepare_and_cleanup):
    cache, core = prepare()
    iterations = 50
    ioclass_id = 1
    tested_extension = "tmp"
    wrong_extensions = ["tm", "tmpx", "txt", "t", "", "123"]
    dd_size = Size(4, Unit.KibiByte)
    dd_count = 10

    ioclass_config.add_ioclass(
        ioclass_id=ioclass_id,
        eviction_priority=1,
        allocation=True,
        rule=f"extension:{tested_extension}&done",
        ioclass_config_path=ioclass_config_path,
    )
    casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)

    TestProperties.LOGGER.info(
        f"Preparing filesystem and mounting {core.system_path} at {mountpoint}"
    )

    core.create_filesystem(Filesystem.ext3)
    core.mount(mountpoint)

    cache.flush_cache()

    # Check if file with proper extension is cached
    dd = (Dd().input("/dev/zero").output(
        f"{mountpoint}/test_file.{tested_extension}").count(
            dd_count).block_size(dd_size))
    TestProperties.LOGGER.info(f"Writing to file with cached extension.")
    for i in range(iterations):
        dd.run()
        sync()
        stats = cache.get_cache_statistics(per_io_class=True,
                                           io_class_id=ioclass_id)
        assert stats["dirty"].get_value(Unit.Blocks4096) == (i + 1) * dd_count

    cache.flush_cache()

    # Check if file with improper extension is not cached
    TestProperties.LOGGER.info(f"Writing to file with no cached extension.")
    for ext in wrong_extensions:
        dd = (Dd().input("/dev/zero").output(f"{mountpoint}/test_file.{ext}").
              count(dd_count).block_size(dd_size))
        dd.run()
        sync()
        stats = cache.get_cache_statistics(per_io_class=True,
                                           io_class_id=ioclass_id)
        assert stats["dirty"].get_value(Unit.Blocks4096) == 0
def test_ioclass_request_size(prepare_and_cleanup):
    cache, core = prepare()

    ioclass_id = 1
    iterations = 100

    ioclass_config.add_ioclass(
        ioclass_id=ioclass_id,
        eviction_priority=1,
        allocation=True,
        rule=f"request_size:ge:8192&request_size:le:16384&done",
        ioclass_config_path=ioclass_config_path,
    )
    casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)

    Udev.disable()

    # Check if requests with appropriate size are cached
    TestProperties.LOGGER.info(
        f"Check if requests with size within defined range are cached")
    cached_req_sizes = [Size(2, Unit.Blocks4096), Size(4, Unit.Blocks4096)]
    for i in range(iterations):
        cache.flush_cache()
        req_size = random.choice(cached_req_sizes)
        dd = (Dd().input("/dev/zero").output(
            core.system_path).count(1).block_size(req_size).oflag("direct"))
        dd.run()
        stats = cache.get_cache_statistics(per_io_class=True,
                                           io_class_id=ioclass_id)
        assert (stats["dirty"].get_value(Unit.Blocks4096) == req_size.value /
                Unit.Blocks4096.value)

    cache.flush_cache()

    # Check if requests with inappropriate size are not cached
    TestProperties.LOGGER.info(
        f"Check if requests with size outside defined range are not cached")
    not_cached_req_sizes = [
        Size(1, Unit.Blocks4096),
        Size(8, Unit.Blocks4096),
        Size(16, Unit.Blocks4096),
    ]
    for i in range(iterations):
        req_size = random.choice(not_cached_req_sizes)
        dd = (Dd().input("/dev/zero").output(
            core.system_path).count(1).block_size(req_size).oflag("direct"))
        dd.run()
        stats = cache.get_cache_statistics(per_io_class=True,
                                           io_class_id=ioclass_id)
        assert stats["dirty"].get_value(Unit.Blocks4096) == 0
def load_io_classes_in_permutation_order(rules, permutation, cache):
    ioclass_config.remove_ioclass_config(ioclass_config_path=ioclass_config_path)
    ioclass_config.create_ioclass_config(
        add_default_rule=False, ioclass_config_path=ioclass_config_path
    )
    # To make test more precise all workload except of tested ioclass should be
    # put in pass-through mode
    ioclass_list = [IoClass.default(allocation=False)]
    for n in range(len(rules)):
        ioclass_list.append(IoClass(class_id=permutation[n], rule=rules[n]))
    IoClass.save_list_to_config_file(ioclass_list,
                                     add_default_rule=False,
                                     ioclass_config_path=ioclass_config_path)
    casadm.load_io_classes(cache.cache_id, file=ioclass_config_path)
def add_done_to_second_non_exclusive_condition(rules, permutation, cache):
    non_exclusive_conditions = 0
    second_class_id = 1
    while True:
        idx = permutation.index(second_class_id)
        if rules[idx] != "direct":
            non_exclusive_conditions += 1
        if non_exclusive_conditions == 2:
            break
        second_class_id += 1
    fs_utils.replace_first_pattern_occurrence(ioclass_config_path,
                                              rules[idx], f"{rules[idx]}&done")
    sync()
    casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
    return second_class_id
예제 #14
0
def load_file_size_io_classes(cache, base_size):
    # IO class order intentional, do not change
    base_size_bytes = int(base_size.get_value(Unit.Byte))
    ioclass_config.add_ioclass(
        ioclass_id=6,
        eviction_priority=1,
        allocation="0.00",
        rule=f"metadata",
        ioclass_config_path=ioclass_config_path,
    )
    ioclass_config.add_ioclass(
        ioclass_id=1,
        eviction_priority=1,
        allocation="1.00",
        rule=f"file_size:eq:{base_size_bytes}",
        ioclass_config_path=ioclass_config_path,
    )
    ioclass_config.add_ioclass(
        ioclass_id=2,
        eviction_priority=1,
        allocation="1.00",
        rule=f"file_size:lt:{base_size_bytes}",
        ioclass_config_path=ioclass_config_path,
    )
    ioclass_config.add_ioclass(
        ioclass_id=3,
        eviction_priority=1,
        allocation="1.00",
        rule=f"file_size:gt:{base_size_bytes}",
        ioclass_config_path=ioclass_config_path,
    )
    ioclass_config.add_ioclass(
        ioclass_id=4,
        eviction_priority=1,
        allocation="1.00",
        rule=f"file_size:le:{int(base_size_bytes / 2)}",
        ioclass_config_path=ioclass_config_path,
    )
    ioclass_config.add_ioclass(
        ioclass_id=5,
        eviction_priority=1,
        allocation="1.00",
        rule=f"file_size:ge:{2 * base_size_bytes}",
        ioclass_config_path=ioclass_config_path,
    )

    casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
예제 #15
0
def test_ioclass_process_name():
    """
        title: Test IO classification by process name.
        description: Check if data generated by process with particular name is cached.
        pass_criteria:
          - No kernel bug.
          - IO is classified properly based on process generating IO name.
    """
    ioclass_id = 1
    dd_size = Size(4, Unit.KibiByte)
    dd_count = 1
    iterations = 100

    with TestRun.step("Prepare cache and core."):
        cache, core = prepare()

    with TestRun.step("Create and load IO class config file."):
        ioclass_config.add_ioclass(
            ioclass_id=ioclass_id,
            eviction_priority=1,
            allocation="1.00",
            rule=f"process_name:dd&done",
            ioclass_config_path=ioclass_config_path,
        )
        casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)

    with TestRun.step("Flush cache and disable udev."):
        cache.flush_cache()
        Udev.disable()

    with TestRun.step("Check if all data generated by dd process is cached."):
        for i in range(iterations):
            dd = (
                Dd()
                .input("/dev/zero")
                .output(core.path)
                .count(dd_count)
                .block_size(dd_size)
                .seek(i)
            )
            dd.run()
            sync()
            time.sleep(0.1)
            dirty = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.dirty
            if dirty.get_value(Unit.Blocks4096) != (i + 1) * dd_count:
                TestRun.LOGGER.error(f"Wrong amount of dirty data ({dirty}).")
def test_ioclass_id_as_condition(filesystem):
    """
        title: IO class as a condition.
        description: |
          Load config in which IO class ids are used as conditions in other IO class definitions.
        pass_criteria:
          - No kernel bug.
          - IO is classified properly as described in IO class config.
    """

    base_dir_path = f"{mountpoint}/base_dir"
    ioclass_file_size = Size(random.randint(25, 50), Unit.MebiByte)
    ioclass_file_size_bytes = int(ioclass_file_size.get_value(Unit.Byte))

    with TestRun.step("Prepare cache and core. Disable udev."):
        cache, core = prepare()
        Udev.disable()

    with TestRun.step("Create and load IO class config file."):
        # directory condition
        ioclass_config.add_ioclass(
            ioclass_id=1,
            eviction_priority=1,
            allocation=True,
            rule=f"directory:{base_dir_path}",
            ioclass_config_path=ioclass_config_path,
        )
        # file size condition
        ioclass_config.add_ioclass(
            ioclass_id=2,
            eviction_priority=1,
            allocation=True,
            rule=f"file_size:eq:{ioclass_file_size_bytes}",
            ioclass_config_path=ioclass_config_path,
        )
        # direct condition
        ioclass_config.add_ioclass(
            ioclass_id=3,
            eviction_priority=1,
            allocation=True,
            rule="direct",
            ioclass_config_path=ioclass_config_path,
        )
        # IO class 1 OR 2 condition
        ioclass_config.add_ioclass(
            ioclass_id=4,
            eviction_priority=1,
            allocation=True,
            rule="io_class:1|io_class:2",
            ioclass_config_path=ioclass_config_path,
        )
        # IO class 4 AND file size condition (same as IO class 2)
        ioclass_config.add_ioclass(
            ioclass_id=5,
            eviction_priority=1,
            allocation=True,
            rule=f"io_class:4&file_size:eq:{ioclass_file_size_bytes}",
            ioclass_config_path=ioclass_config_path,
        )
        # IO class 3 condition
        ioclass_config.add_ioclass(
            ioclass_id=6,
            eviction_priority=1,
            allocation=True,
            rule="io_class:3",
            ioclass_config_path=ioclass_config_path,
        )
        casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)

    with TestRun.step(f"Prepare {filesystem.name} filesystem "
                      f"and mount {core.path} at {mountpoint}."):
        core.create_filesystem(filesystem)
        core.mount(mountpoint)
        fs_utils.create_directory(base_dir_path)
        sync()

    with TestRun.step("Run IO fulfilling IO class 1 condition (and not IO class 2) and check if "
                      "it is classified properly."):
        # Should be classified as IO class 4
        base_occupancy = cache.get_io_class_statistics(io_class_id=4).usage_stats.occupancy
        non_ioclass_file_size = Size(random.randrange(1, 25), Unit.MebiByte)
        (Fio().create_command()
         .io_engine(IoEngine.libaio)
         .size(non_ioclass_file_size)
         .read_write(ReadWrite.write)
         .target(f"{base_dir_path}/test_file_1")
         .run())
        sync()
        new_occupancy = cache.get_io_class_statistics(io_class_id=4).usage_stats.occupancy

        if new_occupancy != base_occupancy + non_ioclass_file_size:
            TestRun.fail("Writes were not properly cached!\n"
                         f"Expected: {base_occupancy + non_ioclass_file_size}, "
                         f"actual: {new_occupancy}")

    with TestRun.step("Run IO fulfilling IO class 2 condition (and not IO class 1) and check if "
                      "it is classified properly."):
        # Should be classified as IO class 5
        base_occupancy = cache.get_io_class_statistics(io_class_id=5).usage_stats.occupancy
        (Fio().create_command()
         .io_engine(IoEngine.libaio)
         .size(ioclass_file_size)
         .read_write(ReadWrite.write)
         .target(f"{mountpoint}/test_file_2")
         .run())
        sync()
        new_occupancy = cache.get_io_class_statistics(io_class_id=5).usage_stats.occupancy

        if new_occupancy != base_occupancy + ioclass_file_size:
            TestRun.fail("Writes were not properly cached!\n"
                         f"Expected: {base_occupancy + ioclass_file_size}, actual: {new_occupancy}")

    with TestRun.step("Run IO fulfilling IO class 1 and 2 conditions and check if "
                      "it is classified properly."):
        # Should be classified as IO class 5
        base_occupancy = new_occupancy
        (Fio().create_command()
         .io_engine(IoEngine.libaio)
         .size(ioclass_file_size)
         .read_write(ReadWrite.write)
         .target(f"{base_dir_path}/test_file_3")
         .run())
        sync()
        new_occupancy = cache.get_io_class_statistics(io_class_id=5).usage_stats.occupancy

        if new_occupancy != base_occupancy + ioclass_file_size:
            TestRun.fail("Writes were not properly cached!\n"
                         f"Expected: {base_occupancy + ioclass_file_size}, actual: {new_occupancy}")

    with TestRun.step("Run direct IO fulfilling IO class 1 and 2 conditions and check if "
                      "it is classified properly."):
        # Should be classified as IO class 6
        base_occupancy = cache.get_io_class_statistics(io_class_id=6).usage_stats.occupancy
        (Fio().create_command()
         .io_engine(IoEngine.libaio)
         .size(ioclass_file_size)
         .read_write(ReadWrite.write)
         .target(f"{base_dir_path}/test_file_3")
         .direct()
         .run())
        sync()
        new_occupancy = cache.get_io_class_statistics(io_class_id=6).usage_stats.occupancy

        if new_occupancy != base_occupancy + ioclass_file_size:
            TestRun.fail("Writes were not properly cached!\n"
                         f"Expected: {base_occupancy + ioclass_file_size}, actual: {new_occupancy}")
def test_ioclass_metadata(filesystem):
    """
        title: Metadata IO classification.
        description: |
          Determine if every operation on files that cause metadata update results in increased
          writes to cached metadata.
        pass_criteria:
          - No kernel bug.
          - Metadata is classified properly.
    """
    # Exact values may not be tested as each file system has different metadata structure.
    test_dir_path = f"{mountpoint}/test_dir"

    with TestRun.step("Prepare cache and core. Disable udev."):
        cache, core = prepare()
        Udev.disable()

    with TestRun.step("Prepare and load IO class config file."):
        ioclass_id = random.randint(1, ioclass_config.MAX_IO_CLASS_ID)
        # metadata IO class
        ioclass_config.add_ioclass(
            ioclass_id=ioclass_id,
            eviction_priority=1,
            allocation=True,
            rule="metadata&done",
            ioclass_config_path=ioclass_config_path,
        )
        casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)

    with TestRun.step(f"Prepare {filesystem.name} filesystem and mount {core.path} "
                      f"at {mountpoint}."):
        core.create_filesystem(filesystem)
        core.mount(mountpoint)
        sync()

    with TestRun.step("Create 20 test files."):
        requests_to_metadata_before = cache.get_io_class_statistics(
            io_class_id=ioclass_id).request_stats.write
        files = []
        for i in range(1, 21):
            file_path = f"{mountpoint}/test_file_{i}"
            dd = (
                Dd().input("/dev/urandom")
                    .output(file_path)
                    .count(random.randint(5, 50))
                    .block_size(Size(1, Unit.MebiByte))
                    .oflag("sync")
            )
            dd.run()
            files.append(File(file_path))

    with TestRun.step("Check requests to metadata."):
        requests_to_metadata_after = cache.get_io_class_statistics(
            io_class_id=ioclass_id).request_stats.write
        if requests_to_metadata_after == requests_to_metadata_before:
            TestRun.fail("No requests to metadata while creating files!")

    with TestRun.step("Rename all test files."):
        requests_to_metadata_before = requests_to_metadata_after
        for file in files:
            file.move(f"{file.full_path}_renamed")
        sync()

    with TestRun.step("Check requests to metadata."):
        requests_to_metadata_after = cache.get_io_class_statistics(
            io_class_id=ioclass_id).request_stats.write
        if requests_to_metadata_after == requests_to_metadata_before:
            TestRun.fail("No requests to metadata while renaming files!")

    with TestRun.step(f"Create directory {test_dir_path}."):
        requests_to_metadata_before = requests_to_metadata_after
        fs_utils.create_directory(path=test_dir_path)

        TestRun.LOGGER.info(f"Moving test files into {test_dir_path}")
        for file in files:
            file.move(test_dir_path)
        sync()

    with TestRun.step("Check requests to metadata."):
        requests_to_metadata_after = cache.get_io_class_statistics(
            io_class_id=ioclass_id).request_stats.write
        if requests_to_metadata_after == requests_to_metadata_before:
            TestRun.fail("No requests to metadata while moving files!")

    with TestRun.step(f"Remove {test_dir_path}."):
        fs_utils.remove(path=test_dir_path, force=True, recursive=True)

    with TestRun.step("Check requests to metadata."):
        requests_to_metadata_after = cache.get_io_class_statistics(
            io_class_id=ioclass_id).request_stats.write
        if requests_to_metadata_after == requests_to_metadata_before:
            TestRun.fail("No requests to metadata while deleting directory with files!")
def test_ioclass_lba():
    """
        title: Test IO classification by lba.
        description: |
          Write data to random lba and check if it is cached according to range
          defined in ioclass rule
        pass_criteria:
          - No kernel bug.
          - IO is classified properly based on lba range defined in config.
    """

    ioclass_id = 1
    min_cached_lba = 56
    max_cached_lba = 200
    iterations = 100
    dd_size = Size(1, Unit.Blocks512)
    dd_count = 1

    with TestRun.step("Prepare cache and core."):
        cache, core = prepare()

    with TestRun.step("Prepare and load IO class config."):
        ioclass_config.add_ioclass(
            ioclass_id=ioclass_id,
            eviction_priority=1,
            allocation=True,
            rule=f"lba:ge:{min_cached_lba}&lba:le:{max_cached_lba}&done",
            ioclass_config_path=ioclass_config_path,
        )
        casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)

    with TestRun.step("Flush cache."):
        cache.flush_cache()

    with TestRun.step("Run IO and check if lbas from defined range are cached."):
        dirty_count = 0
        # '8' step is set to prevent writing cache line more than once
        TestRun.LOGGER.info(f"Writing to one sector in each cache line from range.")
        for lba in range(min_cached_lba, max_cached_lba, 8):
            dd = (
                Dd().input("/dev/zero")
                    .output(f"{core.path}")
                    .count(dd_count)
                    .block_size(dd_size)
                    .seek(lba)
            )
            dd.run()
            sync()
            dirty_count += 1

            dirty = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.dirty
            if dirty.get_value(Unit.Blocks4096) != dirty_count:
                TestRun.LOGGER.error(f"LBA {lba} not cached")

    with TestRun.step("Flush cache."):
        cache.flush_cache()

    with TestRun.step("Run IO and check if lba outside of defined range are not cached."):
        TestRun.LOGGER.info(f"Writing to random sectors outside of cached range.")
        for i in range(iterations):
            rand_lba = random.randrange(2000)
            if min_cached_lba <= rand_lba <= max_cached_lba:
                continue
            dd = (
                Dd().input("/dev/zero")
                    .output(f"{core.path}")
                    .count(dd_count)
                    .block_size(dd_size)
                    .seek(rand_lba)
            )
            dd.run()
            sync()

            dirty = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.dirty
            if dirty.get_value(Unit.Blocks4096) != 0:
                TestRun.LOGGER.error(f"Inappropriately cached lba: {rand_lba}")
def test_ioclass_direct(filesystem):
    """
        title: Direct IO classification.
        description: Check if direct requests are properly cached.
        pass_criteria:
          - No kernel bug.
          - Data from direct IO should be cached.
          - Data from buffered IO should not be cached and if performed to/from already cached data
            should cause reclassification to unclassified IO class.
    """

    ioclass_id = 1
    io_size = Size(random.randint(1000, 2000), Unit.Blocks4096)

    with TestRun.step("Prepare cache and core. Disable udev."):
        cache, core = prepare()
        Udev.disable()

    with TestRun.step("Create and load IO class config."):
        # direct IO class
        ioclass_config.add_ioclass(
            ioclass_id=ioclass_id,
            eviction_priority=1,
            allocation=True,
            rule="direct",
            ioclass_config_path=ioclass_config_path,
        )
        casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)

    with TestRun.step("Prepare fio command."):
        fio = Fio().create_command() \
            .io_engine(IoEngine.libaio) \
            .size(io_size).offset(io_size) \
            .read_write(ReadWrite.write) \
            .target(f"{mountpoint}/tmp_file" if filesystem else core.path)

    with TestRun.step("Prepare filesystem."):
        if filesystem:
            TestRun.LOGGER.info(
                f"Preparing {filesystem.name} filesystem and mounting {core.path} at"
                f" {mountpoint}"
            )
            core.create_filesystem(filesystem)
            core.mount(mountpoint)
            sync()
        else:
            TestRun.LOGGER.info("Testing on raw exported object.")

    with TestRun.step(f"Run buffered writes to {'file' if filesystem else 'device'}"):
        base_occupancy = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.occupancy
        fio.run()
        sync()

    with TestRun.step("Check if buffered writes are not cached."):
        new_occupancy = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.occupancy
        if new_occupancy != base_occupancy:
            TestRun.fail("Buffered writes were cached!\n"
                         f"Expected: {base_occupancy}, actual: {new_occupancy}")

    with TestRun.step(f"Run direct writes to {'file' if filesystem else 'device'}"):
        fio.direct()
        fio.run()
        sync()

    with TestRun.step("Check if direct writes are cached."):
        new_occupancy = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.occupancy
        if new_occupancy != base_occupancy + io_size:
            TestRun.fail("Wrong number of direct writes was cached!\n"
                         f"Expected: {base_occupancy + io_size}, actual: {new_occupancy}")

    with TestRun.step(f"Run buffered reads from {'file' if filesystem else 'device'}"):
        fio.remove_param("readwrite").remove_param("direct")
        fio.read_write(ReadWrite.read)
        fio.run()
        sync()

    with TestRun.step("Check if buffered reads caused reclassification."):
        new_occupancy = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.occupancy
        if new_occupancy != base_occupancy:
            TestRun.fail("Buffered reads did not cause reclassification!"
                         f"Expected occupancy: {base_occupancy}, actual: {new_occupancy}")

    with TestRun.step(f"Run direct reads from {'file' if filesystem else 'device'}"):
        fio.direct()
        fio.run()
        sync()

    with TestRun.step("Check if direct reads are cached."):
        new_occupancy = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.occupancy
        if new_occupancy != base_occupancy + io_size:
            TestRun.fail("Wrong number of direct reads was cached!\n"
                         f"Expected: {base_occupancy + io_size}, actual: {new_occupancy}")
def test_ioclass_request_size():
    """
        title: Test IO classification by request size.
        description: Check if requests with size within defined range are cached.
        pass_criteria:
          - No kernel bug.
          - IO is classified properly based on request size range defined in config.
    """

    ioclass_id = 1
    iterations = 100

    with TestRun.step("Prepare cache and core."):
        cache, core = prepare()

    with TestRun.step("Create and load IO class config."):
        ioclass_config.add_ioclass(
            ioclass_id=ioclass_id,
            eviction_priority=1,
            allocation=True,
            rule=f"request_size:ge:8192&request_size:le:16384&done",
            ioclass_config_path=ioclass_config_path,
        )
        casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)

    with TestRun.step("Disable udev."):
        Udev.disable()

    with TestRun.step("Check if requests with size within defined range are cached."):
        cached_req_sizes = [Size(2, Unit.Blocks4096), Size(4, Unit.Blocks4096)]
        for i in range(iterations):
            cache.flush_cache()
            req_size = random.choice(cached_req_sizes)
            dd = (
                Dd().input("/dev/zero")
                    .output(core.path)
                    .count(1)
                    .block_size(req_size)
                    .oflag("direct")
            )
            dd.run()
            dirty = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.dirty
            if dirty.get_value(Unit.Blocks4096) != req_size.value / Unit.Blocks4096.value:
                TestRun.fail("Incorrect number of dirty blocks!")

    with TestRun.step("Flush cache."):
        cache.flush_cache()

    with TestRun.step("Check if requests with size outside of defined range are not cached"):
        not_cached_req_sizes = [
            Size(1, Unit.Blocks4096),
            Size(8, Unit.Blocks4096),
            Size(16, Unit.Blocks4096),
        ]
        for i in range(iterations):
            req_size = random.choice(not_cached_req_sizes)
            dd = (
                Dd().input("/dev/zero")
                    .output(core.path)
                    .count(1)
                    .block_size(req_size)
                    .oflag("direct")
            )
            dd.run()
            dirty = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.dirty
            if dirty.get_value(Unit.Blocks4096) != 0:
                TestRun.fail("Dirty data present!")
예제 #21
0
def test_ioclass_export_configuration(cache_mode):
    """
    title: Export IO class configuration to a file
    description: |
        Test CAS ability to create a properly formatted file with current IO class configuration
    pass_criteria:
     - CAS default IO class configuration contains unclassified class only
     - CAS properly imports previously exported configuration
    """
    with TestRun.LOGGER.step(f"Test prepare"):
        cache, core = prepare(cache_mode)
        saved_config_path = "/tmp/opencas_saved.conf"
        default_list = [IoClass.default()]

    with TestRun.LOGGER.step(
            f"Check IO class configuration (should contain only default class)"
    ):
        csv = casadm.list_io_classes(cache.cache_id, OutputFormat.csv).stdout
        if not IoClass.compare_ioclass_lists(IoClass.csv_to_list(csv),
                                             default_list):
            TestRun.LOGGER.error(
                "Default configuration does not match expected\n"
                f"Current:\n{csv}\n"
                f"Expected:{IoClass.list_to_csv(default_list)}")

    with TestRun.LOGGER.step(
            "Create and load configuration file for 33 IO classes "
            "with random names, allocation and priority values"):
        random_list = IoClass.generate_random_ioclass_list(33)
        IoClass.save_list_to_config_file(
            random_list, ioclass_config_path=ioclass_config_path)
        casadm.load_io_classes(cache.cache_id, ioclass_config_path)

    with TestRun.LOGGER.step(
            "Display and export IO class configuration - displayed configuration "
            "should be the same as created"):
        TestRun.executor.run(
            f"{casadm.list_io_classes_cmd(str(cache.cache_id), OutputFormat.csv.name)}"
            f" > {saved_config_path}")
        csv = fs_utils.read_file(saved_config_path)
        if not IoClass.compare_ioclass_lists(IoClass.csv_to_list(csv),
                                             random_list):
            TestRun.LOGGER.error(
                "Exported configuration does not match expected\n"
                f"Current:\n{csv}\n"
                f"Expected:{IoClass.list_to_csv(random_list)}")

    with TestRun.LOGGER.step("Stop Intel CAS"):
        casadm.stop_cache(cache.cache_id)

    with TestRun.LOGGER.step("Start cache and add core"):
        cache = casadm.start_cache(cache.cache_device, force=True)
        casadm.add_core(cache, core.core_device)

    with TestRun.LOGGER.step(
            "Check IO class configuration (should contain only default class)"
    ):
        csv = casadm.list_io_classes(cache.cache_id, OutputFormat.csv).stdout
        if not IoClass.compare_ioclass_lists(IoClass.csv_to_list(csv),
                                             default_list):
            TestRun.LOGGER.error(
                "Default configuration does not match expected\n"
                f"Current:\n{csv}\n"
                f"Expected:{IoClass.list_to_csv(default_list)}")

    with TestRun.LOGGER.step(
            "Load exported configuration file for 33 IO classes"):
        casadm.load_io_classes(cache.cache_id, saved_config_path)

    with TestRun.LOGGER.step(
            "Display IO class configuration - should be the same as created"):
        csv = casadm.list_io_classes(cache.cache_id, OutputFormat.csv).stdout
        if not IoClass.compare_ioclass_lists(IoClass.csv_to_list(csv),
                                             random_list):
            TestRun.LOGGER.error(
                "Exported configuration does not match expected\n"
                f"Current:\n{csv}\n"
                f"Expected:{IoClass.list_to_csv(random_list)}")

    with TestRun.LOGGER.step(f"Test cleanup"):
        fs_utils.remove(saved_config_path)
예제 #22
0
def test_ioclass_file_offset():
    """
        title: Test IO classification by file offset.
        description: Test if file offset classification works properly.
        pass_criteria:
          - No kernel bug.
          - IO is classified properly based on IO class rule with file offset.
    """
    ioclass_id = 1
    iterations = 100
    dd_size = Size(4, Unit.KibiByte)
    dd_count = 1
    min_cached_offset = 16384
    max_cached_offset = 65536

    with TestRun.step("Prepare cache and core."):
        cache, core = prepare()

    with TestRun.step("Create and load IO class config file."):
        ioclass_config.add_ioclass(
            ioclass_id=ioclass_id,
            eviction_priority=1,
            allocation="1.00",
            rule=
            f"file_offset:gt:{min_cached_offset}&file_offset:lt:{max_cached_offset}&done",
            ioclass_config_path=ioclass_config_path,
        )
        casadm.load_io_classes(cache_id=cache.cache_id,
                               file=ioclass_config_path)

    with TestRun.step(
            f"Prepare filesystem and mount {core.path} at {mountpoint}."):
        core.create_filesystem(Filesystem.ext3)
        core.mount(mountpoint)

    with TestRun.step("Flush cache."):
        cache.flush_cache()

    with TestRun.step(
            "Write to file within cached offset range and check if it is cached."
    ):
        # Since ioclass rule consists of strict inequalities, 'seek' can't be set to first
        # nor last sector
        min_seek = int((min_cached_offset + Unit.Blocks4096.value) /
                       Unit.Blocks4096.value)
        max_seek = int(
            (max_cached_offset - min_cached_offset - Unit.Blocks4096.value) /
            Unit.Blocks4096.value)

        for i in range(iterations):
            file_offset = random.choice(range(min_seek, max_seek))
            dd = (Dd().input("/dev/zero").output(f"{mountpoint}/tmp_file").
                  count(dd_count).block_size(dd_size).seek(file_offset))
            dd.run()
            sync()
            dirty = cache.get_io_class_statistics(
                io_class_id=ioclass_id).usage_stats.dirty
            if dirty.get_value(Unit.Blocks4096) != 1:
                TestRun.LOGGER.error(f"Offset not cached: {file_offset}")
            cache.flush_cache()

    with TestRun.step(
            "Write to file outside of cached offset range and check if it is not cached."
    ):
        min_seek = 0
        max_seek = int(min_cached_offset / Unit.Blocks4096.value)
        TestRun.LOGGER.info(f"Writing to file outside of cached offset range")
        for i in range(iterations):
            file_offset = random.choice(range(min_seek, max_seek))
            dd = (Dd().input("/dev/zero").output(f"{mountpoint}/tmp_file").
                  count(dd_count).block_size(dd_size).seek(file_offset))
            dd.run()
            sync()
            dirty = cache.get_io_class_statistics(
                io_class_id=ioclass_id).usage_stats.dirty
            if dirty.get_value(Unit.Blocks4096) != 0:
                TestRun.LOGGER.error(
                    f"Inappropriately cached offset: {file_offset}")
예제 #23
0
def test_trim_eviction(cache_mode, cache_line_size, filesystem, cleaning):
    """
        title: Test verifying if trim requests do not cause eviction on CAS device.
        description: |
          When trim requests enabled and files are being added and removed from CAS device,
          there is no eviction (no reads from cache).
        pass_criteria:
          - Reads from cache device are the same before and after removing test file.
    """
    mount_point = "/mnt"
    test_file_path = os.path.join(mount_point, "test_file")

    with TestRun.step("Prepare devices."):
        cache_disk = TestRun.disks['cache']
        cache_disk.create_partitions([Size(1, Unit.GibiByte)])
        cache_dev = cache_disk.partitions[0]

        core_disk = TestRun.disks['core']
        core_disk.create_partitions([Size(1, Unit.GibiByte)])
        core_dev = core_disk.partitions[0]

        cache_block_size = disk_utils.get_block_size(cache_disk)

    with TestRun.step("Start cache on device supporting trim and add core."):
        cache = casadm.start_cache(cache_dev,
                                   cache_mode,
                                   cache_line_size,
                                   force=True)
        cache.set_cleaning_policy(cleaning)
        Udev.disable()
        core = cache.add_core(core_dev)

    with TestRun.step("Create filesystem on CAS device and mount it."):
        core.create_filesystem(filesystem)
        core.mount(mount_point, ["discard"])

    with TestRun.step("Create ioclass config."):
        ioclass_config.create_ioclass_config()
        ioclass_config.add_ioclass(ioclass_id=1,
                                   eviction_priority=1,
                                   allocation="0.00",
                                   rule=f"metadata")
        casadm.load_io_classes(cache_id=cache.cache_id,
                               file=ioclass_config.default_config_file_path)

    with TestRun.step("Create random file using ddrescue."):
        test_file = create_file_with_ddrescue(core_dev, test_file_path)
        os_utils.sync()
        os_utils.drop_caches()
        time.sleep(ioclass_config.MAX_CLASSIFICATION_DELAY.seconds)

    with TestRun.step("Remove file and create a new one."):
        cache_iostats_before = cache_dev.get_io_stats()
        data_reads_before = cache.get_io_class_statistics(
            io_class_id=0).block_stats.cache.reads
        metadata_reads_before = cache.get_io_class_statistics(
            io_class_id=1).block_stats.cache.reads
        test_file.remove()
        os_utils.sync()
        os_utils.drop_caches()
        create_file_with_ddrescue(core_dev, test_file_path)
        os_utils.sync()
        os_utils.drop_caches()
        time.sleep(ioclass_config.MAX_CLASSIFICATION_DELAY.seconds)

    with TestRun.step(
            "Check using iostat that reads from cache did not occur."):
        cache_iostats_after = cache_dev.get_io_stats()
        data_reads_after = cache.get_io_class_statistics(
            io_class_id=0).block_stats.cache.reads
        metadata_reads_after = cache.get_io_class_statistics(
            io_class_id=1).block_stats.cache.reads
        reads_before = cache_iostats_before.sectors_read
        reads_after = cache_iostats_after.sectors_read

        metadata_reads_diff = metadata_reads_after - metadata_reads_before
        data_reads_diff = data_reads_after - data_reads_before
        iostat_diff = (reads_after - reads_before) * cache_block_size

        if iostat_diff > int(metadata_reads_diff) or int(data_reads_diff) > 0:
            TestRun.fail(
                f"Number of reads from cache before and after removing test file "
                f"differs. Sectors read before: {reads_before}, sectors read after: {reads_after}."
                f"Data read from cache before {data_reads_before}, after {data_reads_after}."
                f"Metadata read from cache before {metadata_reads_before}, "
                f"after {metadata_reads_after}.")
        else:
            TestRun.LOGGER.info(
                "Number of reads from cache before and after removing test file is the same."
            )
def test_ioclass_occuppancy_load(cache_line_size):
    """
        title: Load cache with occupancy limit specified
        description: |
          Load cache and verify if occupancy limits are loaded correctly and if
          each part has assigned apropriate number of
          dirty blocks.
        pass_criteria:
          - Occupancy thresholds have correct values for each ioclass after load
    """
    with TestRun.step("Prepare CAS device"):
        cache, core = prepare(cache_mode=CacheMode.WB,
                              cache_line_size=cache_line_size)
        cache_size = cache.get_statistics().config_stats.cache_size

    with TestRun.step("Disable udev"):
        Udev.disable()

    with TestRun.step(
            f"Prepare filesystem and mount {core.path} at {mountpoint}"):
        filesystem = Filesystem.xfs
        core.create_filesystem(filesystem)
        core.mount(mountpoint)
        sync()

    with TestRun.step("Prepare test dirs"):
        IoclassConfig = namedtuple("IoclassConfig",
                                   "id eviction_prio max_occupancy dir_path")
        io_classes = [
            IoclassConfig(1, 3, 0.30, f"{mountpoint}/A"),
            IoclassConfig(2, 3, 0.30, f"{mountpoint}/B"),
            IoclassConfig(3, 3, 0.30, f"{mountpoint}/C"),
        ]

        for io_class in io_classes:
            fs_utils.create_directory(io_class.dir_path, parents=True)

    with TestRun.step("Remove old ioclass config"):
        ioclass_config.remove_ioclass_config()
        ioclass_config.create_ioclass_config(False)

    with TestRun.step("Add default ioclasses"):
        ioclass_config.add_ioclass(*str(IoClass.default(
            allocation="0.00")).split(","))

    with TestRun.step("Add ioclasses for all dirs"):
        for io_class in io_classes:
            ioclass_config.add_ioclass(
                io_class.id,
                f"directory:{io_class.dir_path}&done",
                io_class.eviction_prio,
                f"{io_class.max_occupancy:0.2f}",
            )

        casadm.load_io_classes(cache_id=cache.cache_id,
                               file=ioclass_config_path)

    with TestRun.step("Reset cache stats"):
        cache.purge_cache()
        cache.reset_counters()

    with TestRun.step("Check initial occupancy"):
        for io_class in io_classes:
            occupancy = get_io_class_occupancy(cache, io_class.id)
            if occupancy.get_value() != 0:
                TestRun.LOGGER.error(
                    f"Incorrect inital occupancy for ioclass id: {io_class.id}."
                    f" Expected 0, got: {occupancy}")

    with TestRun.step(f"Perform IO with size equal to cache size"):
        for io_class in io_classes:
            run_io_dir(f"{io_class.dir_path}/tmp_file",
                       int((cache_size) / Unit.Blocks4096))

    with TestRun.step(
            "Check if the ioclass did not exceed specified occupancy"):
        for io_class in io_classes:
            actuall_dirty = get_io_class_dirty(cache, io_class.id)

            dirty_limit = ((io_class.max_occupancy * cache_size).align_down(
                Unit.Blocks4096.get_value()).set_unit(Unit.Blocks4096))

            if not isclose(actuall_dirty.get_value(),
                           dirty_limit.get_value(),
                           rel_tol=0.1):
                TestRun.LOGGER.error(
                    f"Dirty for ioclass id: {io_class.id} doesn't match expected."
                    f"Expected: {dirty_limit}, actuall: {actuall_dirty}")

    with TestRun.step("Stop cache without flushing the data"):
        original_usage_stats = {}
        for io_class in io_classes:
            original_usage_stats[io_class.id] = get_io_class_usage(
                cache, io_class.id)

        original_ioclass_list = cache.list_io_classes()
        cache_disk_path = cache.cache_device.path
        core.unmount()
        cache.stop(no_data_flush=True)

    with TestRun.step("Load cache"):
        cache = casadm.start_cache(Device(cache_disk_path), load=True)

    with TestRun.step(
            "Check if the ioclass did not exceed specified occupancy"):
        for io_class in io_classes:
            actuall_dirty = get_io_class_dirty(cache, io_class.id)

            dirty_limit = ((io_class.max_occupancy * cache_size).align_down(
                Unit.Blocks4096.get_value()).set_unit(Unit.Blocks4096))

            if not isclose(actuall_dirty.get_value(),
                           dirty_limit.get_value(),
                           rel_tol=0.1):
                TestRun.LOGGER.error(
                    f"Dirty for ioclass id: {io_class.id} doesn't match expected."
                    f"Expected: {dirty_limit}, actuall: {actuall_dirty}")

    with TestRun.step("Compare ioclass configs"):
        ioclass_list_after_load = cache.list_io_classes()

        if len(ioclass_list_after_load) != len(original_ioclass_list):
            TestRun.LOGGER.error(
                f"Ioclass occupancy limit doesn't match. Original list size: "
                f"{len(original_ioclass_list)}, loaded list size: "
                f"{len(ioclass_list_after_load)}")

        original_sorted = sorted(original_ioclass_list, key=lambda k: k["id"])
        loaded_sorted = sorted(ioclass_list_after_load, key=lambda k: k["id"])

        for original, loaded in zip(original_sorted, loaded_sorted):
            original_allocation = original["allocation"]
            loaded_allocation = loaded["allocation"]
            ioclass_id = original["id"]
            if original_allocation != loaded_allocation:
                TestRun.LOGGER.error(
                    f"Occupancy limit doesn't match for ioclass {ioclass_id}: "
                    f"Original: {original_allocation}, loaded: {loaded_allocation}"
                )

    with TestRun.step("Compare usage stats before and after the load"):
        for io_class in io_classes:
            actuall_usage_stats = get_io_class_usage(cache, io_class.id)
            if original_usage_stats[io_class.id] != actuall_usage_stats:
                TestRun.LOGGER.error(
                    f"Usage stats doesn't match for ioclass {io_class.id}. "
                    f"Original: {original_usage_stats[io_class.id]}, "
                    f"loaded: {actuall_usage_stats}")
예제 #25
0
def test_ioclass_file_extension_preexisting_filesystem():
    """
        title: Test IO classification by file extension with preexisting filesystem on core device.
        description: |
          Test if file extension classification works properly when there is an existing
          filesystem on core device.
        pass_criteria:
          - No kernel bug.
          - IO is classified properly based on IO class rule with file extension
            after mounting core device.
    """
    ioclass_id = 1
    extensions = ["tmp", "tm", "out", "txt", "log", "123"]
    dd_size = Size(4, Unit.KibiByte)
    dd_count = 10

    with TestRun.step("Prepare cache and core devices."):
        cache, core = prepare()

    with TestRun.step(f"Prepare files on raw block device."):
        casadm.remove_core(cache.cache_id, core_id=core.core_id)
        core.core_device.create_filesystem(Filesystem.ext3)
        core.core_device.mount(mountpoint)

        for ext in extensions:
            dd = (
                Dd().input("/dev/zero").output(f"{mountpoint}/test_file.{ext}")
                .count(dd_count).block_size(dd_size))
            dd.run()
        core.core_device.unmount()

    with TestRun.step("Create IO class config."):
        rule = "|".join([f"extension:{ext}" for ext in extensions])
        ioclass_config.add_ioclass(
            ioclass_id=ioclass_id,
            eviction_priority=1,
            allocation="1.00",
            rule=f"{rule}&done",
            ioclass_config_path=ioclass_config_path,
        )

    with TestRun.step(f"Add device with preexisting data as a core."):
        core = casadm.add_core(cache, core_dev=core.core_device)

    with TestRun.step("Load IO class config."):
        casadm.load_io_classes(cache_id=cache.cache_id,
                               file=ioclass_config_path)

    with TestRun.step("Mount core and flush cache."):
        core.mount(mountpoint)
        cache.flush_cache()

    with TestRun.step(
            f"Write to file with cached extension and check if they are cached."
    ):
        for ext in extensions:
            dd = (
                Dd().input("/dev/zero").output(f"{mountpoint}/test_file.{ext}")
                .count(dd_count).block_size(dd_size))
            dd.run()
            sync()
            dirty = cache.get_io_class_statistics(
                io_class_id=ioclass_id).usage_stats.dirty
            if dirty.get_value(
                    Unit.Blocks4096) != (extensions.index(ext) + 1) * dd_count:
                TestRun.LOGGER.error(f"Wrong amount of dirty data ({dirty}).")
예제 #26
0
def test_ioclass_file_size(filesystem):
    """
        title: Test IO classification by file size.
        description: Test if file size classification works properly.
        pass_criteria:
          - No kernel bug.
          - IO is classified properly based on IO class rule with file size.
    """

    # File size IO class rules are configured in a way that each tested file size is unambiguously
    # classified.
    # Firstly write operations are tested (creation of files), secondly read operations.

    base_size = Size(random.randint(50, 1000) * 2, Unit.Blocks4096)
    size_to_class = {
        base_size: 1,
        base_size - Unit.Blocks4096: 2,
        base_size + Unit.Blocks4096: 3,
        base_size / 2: 4,
        base_size / 2 - Unit.Blocks4096: 4,
        base_size / 2 + Unit.Blocks4096: 2,
        base_size * 2: 5,
        base_size * 2 - Unit.Blocks4096: 3,
        base_size * 2 + Unit.Blocks4096: 5,
    }

    with TestRun.step("Prepare cache and core."):
        cache, core = prepare(default_allocation="1.00")

    with TestRun.step("Prepare and load IO class config."):
        load_file_size_io_classes(cache, base_size)

    with TestRun.step(
            f"Prepare {filesystem.name} filesystem and mount {core.path} "
            f"at {mountpoint}."):
        core.create_filesystem(filesystem)
        core.mount(mountpoint)
        sync()

    with TestRun.step(
            "Create files belonging to different IO classes (classification by writes)."
    ):
        test_files = []
        for size, ioclass_id in size_to_class.items():
            occupancy_before = cache.get_io_class_statistics(
                io_class_id=ioclass_id).usage_stats.occupancy
            file_path = f"{mountpoint}/test_file_{size.get_value()}"
            Dd().input("/dev/zero").output(file_path).oflag("sync").block_size(
                size).count(1).run()
            sync()
            drop_caches(DropCachesMode.ALL)
            occupancy_after = cache.get_io_class_statistics(
                io_class_id=ioclass_id).usage_stats.occupancy
            if occupancy_after != occupancy_before + size:
                TestRun.fail("File not cached properly!\n"
                             f"Expected {occupancy_before + size}\n"
                             f"Actual {occupancy_after}")
            test_files.append(File(file_path).refresh_item())
        sync()
        drop_caches(DropCachesMode.ALL)

    with TestRun.step("Move all files to 'unclassified' IO class."):
        ioclass_config.remove_ioclass_config(
            ioclass_config_path=ioclass_config_path)
        ioclass_config.create_ioclass_config(
            add_default_rule=False, ioclass_config_path=ioclass_config_path)
        ioclass_config.add_ioclass(
            ioclass_id=0,
            eviction_priority=22,
            allocation="1.00",
            rule="unclassified",
            ioclass_config_path=ioclass_config_path,
        )
        ioclass_config.add_ioclass(
            ioclass_id=6,
            eviction_priority=1,
            allocation="0.00",
            rule=f"metadata",
            ioclass_config_path=ioclass_config_path,
        )
        casadm.load_io_classes(cache_id=cache.cache_id,
                               file=ioclass_config_path)
        occupancy_before = cache.get_io_class_statistics(
            io_class_id=0).usage_stats.occupancy
        for file in test_files:
            Dd().input(file.full_path).output("/dev/null").block_size(
                file.size).run()
            sync()
            drop_caches(DropCachesMode.ALL)
            occupancy_after = cache.get_io_class_statistics(
                io_class_id=0).usage_stats.occupancy
            occupancy_expected = occupancy_before + file.size
            if occupancy_after != occupancy_expected:
                TestRun.fail("File not reclassified properly!\n"
                             f"Expected {occupancy_expected}\n"
                             f"Actual {occupancy_after}")
            occupancy_before = occupancy_after
        sync()
        drop_caches(DropCachesMode.ALL)

    with TestRun.step("Restore IO class configuration."):
        ioclass_config.remove_ioclass_config(
            ioclass_config_path=ioclass_config_path)
        ioclass_config.create_ioclass_config(
            add_default_rule=False, ioclass_config_path=ioclass_config_path)
        ioclass_config.add_ioclass(
            ioclass_id=0,
            eviction_priority=22,
            allocation="1.00",
            rule="unclassified",
            ioclass_config_path=ioclass_config_path,
        )
        load_file_size_io_classes(cache, base_size)

    with TestRun.step(
            "Read files belonging to different IO classes (classification by reads)."
    ):
        # CAS device should be unmounted and mounted because data can be sometimes still cached by
        # OS cache so occupancy statistics will not match
        core.unmount()
        core.mount(mountpoint)
        for file in test_files:
            ioclass_id = size_to_class[file.size]
            occupancy_before = cache.get_io_class_statistics(
                io_class_id=ioclass_id).usage_stats.occupancy
            Dd().input(file.full_path).output("/dev/null").block_size(
                file.size).run()
            sync()
            drop_caches(DropCachesMode.ALL)
            occupancy_after = cache.get_io_class_statistics(
                io_class_id=ioclass_id).usage_stats.occupancy
            actual_blocks = occupancy_after.get_value(Unit.Blocks4096)
            expected_blocks = (occupancy_before + file.size).get_value(
                Unit.Blocks4096)
            if actual_blocks != expected_blocks:
                TestRun.fail("File not reclassified properly!\n"
                             f"Expected {occupancy_before + file.size}\n"
                             f"Actual {occupancy_after}")
        sync()
        drop_caches(DropCachesMode.ALL)
def test_ioclass_resize(cache_line_size, new_occupancy):
    """
        title: Resize ioclass
        description: |
          Add ioclass, fill it with data, change it's size and check if new
          limit is respected
        pass_criteria:
          - Occupancy threshold is respected
    """
    with TestRun.step("Prepare CAS device"):
        cache, core = prepare(cache_mode=CacheMode.WT,
                              cache_line_size=cache_line_size)
        cache_size = cache.get_statistics().config_stats.cache_size

    with TestRun.step("Disable udev"):
        Udev.disable()

    with TestRun.step(
            f"Prepare filesystem and mount {core.path} at {mountpoint}"):
        filesystem = Filesystem.xfs
        core.create_filesystem(filesystem)
        core.mount(mountpoint)
        sync()

    with TestRun.step("Prepare test dirs"):
        IoclassConfig = recordclass("IoclassConfig",
                                    "id eviction_prio max_occupancy dir_path")
        io_class = IoclassConfig(2, 3, 0.10, f"{mountpoint}/A")

        fs_utils.create_directory(io_class.dir_path, parents=True)

    with TestRun.step("Remove old ioclass config"):
        ioclass_config.remove_ioclass_config()
        ioclass_config.create_ioclass_config(False)

    with TestRun.step("Add default ioclasses"):
        ioclass_config.add_ioclass(ioclass_id=1,
                                   rule="metadata&done",
                                   eviction_priority=1,
                                   allocation="1.00",
                                   ioclass_config_path=ioclass_config_path)
        ioclass_config.add_ioclass(*str(IoClass.default(
            allocation="0.00")).split(","))

    with TestRun.step("Add directory for ioclass"):
        ioclass_config.add_ioclass(
            io_class.id,
            f"directory:{io_class.dir_path}&done",
            io_class.eviction_prio,
            f"{io_class.max_occupancy:0.2f}",
        )

        casadm.load_io_classes(cache_id=cache.cache_id,
                               file=ioclass_config_path)

    with TestRun.step("Reset cache stats"):
        cache.purge_cache()
        cache.reset_counters()

    with TestRun.step("Check initial occupancy"):
        occupancy = get_io_class_occupancy(cache, io_class.id)
        if occupancy.get_value() != 0:
            TestRun.LOGGER.error(
                f"Incorrect inital occupancy for ioclass id: {io_class.id}."
                f" Expected 0, got: {occupancy}")

    with TestRun.step(f"Perform IO with size equal to cache size"):
        run_io_dir(f"{io_class.dir_path}/tmp_file",
                   int((cache_size) / Unit.Blocks4096))

    with TestRun.step(
            "Check if the ioclass did not exceed specified occupancy"):
        actuall_occupancy = get_io_class_occupancy(cache, io_class.id)

        occupancy_limit = ((io_class.max_occupancy * cache_size).align_up(
            Unit.Blocks4096.get_value()).set_unit(Unit.Blocks4096))

        # Divergency may be casued be rounding max occupancy
        if actuall_occupancy > occupancy_limit + Size(100, Unit.Blocks4096):
            TestRun.LOGGER.error(
                f"Occupancy for ioclass id exceeded: {io_class.id}. "
                f"Limit: {occupancy_limit}, actuall: {actuall_occupancy}")

    with TestRun.step(
            f"Resize ioclass from {io_class.max_occupancy*100}% to {new_occupancy}%"
            " cache occupancy"):
        io_class.max_occupancy = new_occupancy / 100
        ioclass_config.remove_ioclass_config()
        ioclass_config.create_ioclass_config(False)

        ioclass_config.add_ioclass(*str(IoClass.default(
            allocation="0.00")).split(","))

        ioclass_config.add_ioclass(ioclass_id=1,
                                   rule="metadata&done",
                                   eviction_priority=1,
                                   allocation="1.00",
                                   ioclass_config_path=ioclass_config_path)
        ioclass_config.add_ioclass(
            io_class.id,
            f"directory:{io_class.dir_path}&done",
            io_class.eviction_prio,
            f"{io_class.max_occupancy:0.2f}",
        )

        casadm.load_io_classes(cache_id=cache.cache_id,
                               file=ioclass_config_path)

    with TestRun.step(f"Perform IO with size equal to cache size"):
        run_io_dir(f"{io_class.dir_path}/tmp_file",
                   int((cache_size) / Unit.Blocks4096))

    with TestRun.step(
            "Check if the ioclass did not exceed specified occupancy"):
        actuall_occupancy = get_io_class_occupancy(cache, io_class.id)

        occupancy_limit = ((io_class.max_occupancy * cache_size).align_up(
            Unit.Blocks4096.get_value()).set_unit(Unit.Blocks4096))

        # Divergency may be casued be rounding max occupancy
        if actuall_occupancy > occupancy_limit + Size(100, Unit.Blocks4096):
            TestRun.LOGGER.error(
                f"Occupancy for ioclass id exceeded: {io_class.id}. "
                f"Limit: {occupancy_limit}, actuall: {actuall_occupancy}")
예제 #28
0
def test_ioclass_file_extension():
    """
        title: Test IO classification by file extension.
        description: Test if file extension classification works properly.
        pass_criteria:
          - No kernel bug.
          - IO is classified properly based on IO class rule with file extension.
    """
    iterations = 50
    ioclass_id = 1
    tested_extension = "tmp"
    wrong_extensions = ["tm", "tmpx", "txt", "t", "", "123", "tmp.xx"]
    dd_size = Size(4, Unit.KibiByte)
    dd_count = 10
    dd = (Dd().input("/dev/zero").output(
        f"{mountpoint}/test_file.{tested_extension}").count(
            dd_count).block_size(dd_size))

    with TestRun.step("Prepare cache and core."):
        cache, core = prepare()

    with TestRun.step("Create and load IO class config."):
        ioclass_config.add_ioclass(
            ioclass_id=ioclass_id,
            eviction_priority=1,
            allocation="1.00",
            rule=f"extension:{tested_extension}&done",
            ioclass_config_path=ioclass_config_path,
        )
        casadm.load_io_classes(cache_id=cache.cache_id,
                               file=ioclass_config_path)

    with TestRun.step(
            f"Prepare filesystem and mount {core.path} at {mountpoint}."):
        core.create_filesystem(Filesystem.ext3)
        core.mount(mountpoint)

    with TestRun.step("Flush cache."):
        cache.flush_cache()

    with TestRun.step(
            f"Write to file with cached extension and check if it is properly cached."
    ):
        for i in range(iterations):
            dd.run()
            sync()
            dirty = cache.get_io_class_statistics(
                io_class_id=ioclass_id).usage_stats.dirty
            if dirty.get_value(Unit.Blocks4096) != (i + 1) * dd_count:
                TestRun.LOGGER.error(f"Wrong amount of dirty data ({dirty}).")

    with TestRun.step("Flush cache."):
        cache.flush_cache()

    with TestRun.step(
            f"Write to file with not cached extension and check if it is not cached."
    ):
        for ext in wrong_extensions:
            dd = (
                Dd().input("/dev/zero").output(f"{mountpoint}/test_file.{ext}")
                .count(dd_count).block_size(dd_size))
            dd.run()
            sync()
            dirty = cache.get_io_class_statistics(
                io_class_id=ioclass_id).usage_stats.dirty
            if dirty.get_value(Unit.Blocks4096) != 0:
                TestRun.LOGGER.error(f"Wrong amount of dirty data ({dirty}).")
예제 #29
0
def test_ioclass_file_name_prefix():
    """
        title: Test IO classification by file name prefix.
        description: Test if file name prefix classification works properly.
        pass_criteria:
          - No kernel bug.
          - IO is classified properly based on IO class rule with file name prefix.
    """

    ioclass_id = 1
    cached_files = ["test", "test.txt", "test1", "test1.txt"]
    not_cached_files = ["file1", "file2", "file4", "file5", "tes"]
    dd_size = Size(4, Unit.KibiByte)
    dd_count = 10

    with TestRun.step("Prepare cache and core."):
        cache, core = prepare()

    with TestRun.step("Create and load IO class config."):
        ioclass_config.remove_ioclass_config()
        ioclass_config.create_ioclass_config(False)

        # Avoid caching anything else than files with specified prefix
        ioclass_config.add_ioclass(
            ioclass_id=0,
            eviction_priority=255,
            allocation="0.00",
            rule=f"unclassified",
            ioclass_config_path=ioclass_config_path,
        )
        # Enables file with specified prefix to be cached
        ioclass_config.add_ioclass(
            ioclass_id=ioclass_id,
            eviction_priority=1,
            allocation="1.00",
            rule=f"file_name_prefix:test&done",
            ioclass_config_path=ioclass_config_path,
        )
        casadm.load_io_classes(cache_id=cache.cache_id,
                               file=ioclass_config_path)

    with TestRun.step(
            f"Prepare filesystem and mount {core.path} at {mountpoint}"):
        previous_occupancy = cache.get_occupancy()

        core.create_filesystem(Filesystem.ext3)
        core.mount(mountpoint)

        current_occupancy = cache.get_occupancy()
        if previous_occupancy.get_value() > current_occupancy.get_value():
            TestRun.fail(
                f"Current occupancy ({str(current_occupancy)}) is lower "
                f"than before ({str(previous_occupancy)}).")

        # Filesystem creation caused metadata IO which is not supposed
        # to be cached

    # Check if files with proper prefix are cached
    with TestRun.step(f"Write files which are supposed to be cached and check "
                      f"if they are cached."):
        for f in cached_files:
            dd = (Dd().input("/dev/zero").output(f"{mountpoint}/{f}").count(
                dd_count).block_size(dd_size))
            dd.run()
            sync()
            current_occupancy = cache.get_occupancy()
            expected_occupancy = previous_occupancy + (dd_size * dd_count)
            if current_occupancy != expected_occupancy:
                TestRun.fail(f"Current occupancy value is not valid. "
                             f"(Expected: {str(expected_occupancy)}, "
                             f"actual: {str(current_occupancy)})")
            previous_occupancy = current_occupancy

    with TestRun.step("Flush cache."):
        cache.flush_cache()

    # Check if file with improper extension is not cached
    with TestRun.step(
            f"Write files which are not supposed to be cached and check if "
            f"they are not cached."):
        for f in not_cached_files:
            dd = (Dd().input("/dev/zero").output(f"{mountpoint}/{f}").count(
                dd_count).block_size(dd_size))
            dd.run()
            sync()
            current_occupancy = cache.get_occupancy()
            if current_occupancy != previous_occupancy:
                TestRun.fail(f"Current occupancy value is not valid. "
                             f"(Expected: {str(previous_occupancy)}, "
                             f"actual: {str(current_occupancy)})")
예제 #30
0
def test_ioclass_pid():
    """
        title: Test IO classification by process id.
        description: Check if data generated by process with particular id is cached.
        pass_criteria:
          - No kernel bug.
          - IO is classified properly based on process generating IO id.
    """
    ioclass_id = 1
    iterations = 20
    dd_count = 100
    dd_size = Size(4, Unit.KibiByte)

    with TestRun.step("Prepare cache, core and disable udev."):
        cache, core = prepare()
        Udev.disable()

    with TestRun.step("Prepare dd command."):
        # Since 'dd' has to be executed right after writing pid to 'ns_last_pid',
        # 'dd' command is created and is appended to 'echo' command instead of running it
        dd_command = str(Dd().input("/dev/zero").output(
            core.path).count(dd_count).block_size(dd_size))

    for _ in TestRun.iteration(range(iterations)):
        with TestRun.step("Flush cache."):
            cache.flush_cache()

        with TestRun.step("Prepare and load IO class config."):
            output = TestRun.executor.run("cat /proc/sys/kernel/ns_last_pid")
            if output.exit_code != 0:
                raise Exception(
                    f"Failed to retrieve pid. stdout: {output.stdout} \n stderr :{output.stderr}"
                )

            # Few pids might be used by system during test preparation
            pid = int(output.stdout) + 50

            ioclass_config.add_ioclass(
                ioclass_id=ioclass_id,
                eviction_priority=1,
                allocation="1.00",
                rule=f"pid:eq:{pid}&done",
                ioclass_config_path=ioclass_config_path,
            )
            casadm.load_io_classes(cache_id=cache.cache_id,
                                   file=ioclass_config_path)

        with TestRun.step(f"Run dd with pid {pid}."):
            # pid saved in 'ns_last_pid' has to be smaller by one than target dd pid
            dd_and_pid_command = (
                f"echo {pid-1} > /proc/sys/kernel/ns_last_pid && {dd_command}")
            output = TestRun.executor.run(dd_and_pid_command)
            if output.exit_code != 0:
                raise Exception(
                    f"Failed to run dd with target pid. "
                    f"stdout: {output.stdout} \n stderr :{output.stderr}")
            sync()
        with TestRun.step("Check if data was cached properly."):
            dirty = cache.get_io_class_statistics(
                io_class_id=ioclass_id).usage_stats.dirty
            if dirty.get_value(Unit.Blocks4096) != dd_count:
                TestRun.LOGGER.error(f"Wrong amount of dirty data ({dirty}).")
            ioclass_config.remove_ioclass(ioclass_id)