示例#1
0
def test_data_integrity_5d():
    """
        title: |
          Data integrity test on three cas instances with different
          cache modes with duration time equal to 5 days
        description: |
          Create 3 cache instances with different cache modes on caches equal to 50GB
          and cores equal to 150GB, and run workload with data verification.
        pass_criteria:
            - System does not crash.
            - All operations complete successfully.
            - Data consistency is preserved.
    """
    with TestRun.step("Prepare cache and core devices"):
        cache_devices, core_devices = prepare_devices()

    with TestRun.step(
            "Run 4 cache instances in different cache modes, add single core to each"
    ):
        cache_modes = [CacheMode.WT, CacheMode.WB, CacheMode.WA, CacheMode.WO]
        caches = []
        cores = []
        for i in range(4):
            cache, core = start_instance(cache_devices[i], core_devices[i],
                                         cache_modes[i])
            caches.append(cache)
            cores.append(core)

    with TestRun.step("Run test workloads with verification"):
        fio_run = Fio().create_command()
        fio_run.io_engine(IoEngine.libaio)
        fio_run.direct()
        fio_run.time_based()
        fio_run.do_verify()
        fio_run.verify(VerifyMethod.md5)
        fio_run.verify_dump()
        fio_run.run_time(runtime)
        fio_run.read_write(ReadWrite.randrw)
        fio_run.io_depth(128)
        fio_run.blocksize_range([(start_size, stop_size)])
        for core in cores:
            fio_job = fio_run.add_job()
            fio_job.target(core)
        fio_run.run()

    with TestRun.step("Calculate md5 for each core"):
        core_md5s = [File(core.full_path).md5sum() for core in cores]

    with TestRun.step("Stop caches"):
        for cache in caches:
            cache.stop()

    with TestRun.step("Calculate md5 for each core"):
        dev_md5s = [File(dev.full_path).md5sum() for dev in core_devices]

    with TestRun.step("Compare md5 sums for cores and core devices"):
        for core_md5, dev_md5, mode in zip(core_md5s, dev_md5s, cache_modes):
            if core_md5 != dev_md5:
                TestRun.fail(f"MD5 sums of core and core device do not match! "
                             f"Cache mode: {mode}")
示例#2
0
def download_file(url, destination_dir="/tmp"):
    # TODO use wget module instead
    command = ("wget --tries=3 --timeout=5 --continue --quiet "
               f"--directory-prefix={destination_dir} {url}")
    TestRun.executor.run_expect_success(command)
    path = f"{destination_dir.rstrip('/')}/{File.get_name(url)}"
    return File(path)
def prepare_md_dump(cache_device, core_device, cls, cache_id):
    with TestRun.step("Setup WB cache instance with one core"):
        cache = casadm.start_cache(
            cache_dev=cache_device,
            cache_line_size=cls,
            cache_mode=CacheMode.WB,
            cache_id=cache_id,
            force=True,
        )
        cache.add_core(core_device)

    with TestRun.step("Get metadata size"):
        dmesg_out = TestRun.executor.run_expect_success("dmesg").stdout
        md_size = dmesg.get_metadata_size(dmesg_out)

    with TestRun.step("Dump the metadata of the cache"):
        dump_file_path = "/tmp/test_activate_corrupted.dump"
        md_dump = File(dump_file_path)
        md_dump.remove(force=True, ignore_errors=True)

        dd_count = int(md_size / Size(1, Unit.MebiByte)) + 1
        (Dd().input(cache_device.path).output(md_dump.full_path).block_size(
            Size(1, Unit.MebiByte)).count(dd_count).run())
        md_dump.refresh_item()

    with TestRun.step("Stop cache device"):
        cache.stop()

        return md_dump
示例#4
0
def download_file(url, destination_dir="/tmp"):
    command = ("wget --tries=3 --timeout=5 --continue --quiet "
               f"--directory-prefix={destination_dir} {url}")
    output = TestRun.executor.run(command)
    if output.exit_code != 0:
        raise Exception(
            f"Download failed. stdout: {output.stdout} \n stderr :{output.stderr}")
    path = f"{destination_dir.rstrip('/')}/{File.get_name(url)}"
    return File(path)
def create_file_with_ddrescue(core_dev, test_file_path):
    ddrescue = Ddrescue() \
        .block_size(Size(1, Unit.Blocks4096)) \
        .size(core_dev.size * 0.9) \
        .synchronous() \
        .source("/dev/urandom") \
        .destination(test_file_path)
    ddrescue.run()

    return File(test_file_path)
示例#6
0
def uncompress_archive(file, destination=None):
    from test_utils.filesystem.file import File

    if not isinstance(file, File):
        file = File(file)
    if not destination:
        destination = file.parent_dir
    command = (f"unzip -u {file.full_path} -d {destination}"
               if str(file).endswith(".zip")
               else f"tar --extract --file={file.full_path} --directory={destination}")
    TestRun.executor.run_expect_success(command)
示例#7
0
def parse_ls_output(ls_output, dir_path=''):
    split_output = ls_output.split('\n')
    fs_items = []
    for line in split_output:
        if not line.strip():
            continue
        line_fields = line.split()
        if len(line_fields) < 8:
            continue
        file_type = line[0]
        if file_type not in ['-', 'd', 'l', 'b', 'c', 'p', 's']:
            continue
        permissions = line_fields[0][1:].replace('.', '')
        owner = line_fields[2]
        group = line_fields[3]
        size = Size(float(line_fields[4]), Unit.Byte)
        split_date = line_fields[5].split('-')
        split_time = line_fields[6].split(':')
        modification_time = datetime(int(split_date[0]), int(split_date[1]),
                                     int(split_date[2]), int(split_time[0]),
                                     int(split_time[1]), int(split_time[2]))
        if dir_path and file_type != 'l':
            full_path = '/'.join([dir_path, line_fields[7]])
        else:
            full_path = line_fields[7]

        from test_utils.filesystem.file import File, FsItem
        from test_utils.filesystem.directory import Directory
        from test_utils.filesystem.symlink import Symlink

        if file_type == '-':
            fs_item = File(full_path)
        elif file_type == 'd':
            fs_item = Directory(full_path)
        elif file_type == 'l':
            target_path = TestProperties.executor.execute(
                f"readlink -f {full_path}").stdout
            fs_item = Symlink(full_path, target_path)
        else:
            fs_item = FsItem(full_path)

        fs_item.permissions.user = Permissions['|'.join(list(permissions[:3].replace('-', '')))]\
            if permissions[:3] != '---' else Permissions(0)
        fs_item.permissions.group = Permissions['|'.join(list(permissions[3:6].replace('-', '')))]\
            if permissions[3:6] != '---' else Permissions(0)
        fs_item.permissions.other = Permissions['|'.join(list(permissions[6:].replace('-', '')))]\
            if permissions[6:] != '---' else Permissions(0)

        fs_item.owner = owner
        fs_item.group = group
        fs_item.size = size
        fs_item.modification_time = modification_time
        fs_items.append(fs_item)
    return fs_items
def prepare_corrupted_md(md_dump, offset_to_corrupt, bs):
    invalid_dump_path = "/tmp/test_activate_corrupted.invalid_dump"
    dd_count = offset_to_corrupt + 1

    md_dump.copy(destination=invalid_dump_path, force=True)
    corrupted_md = File(invalid_dump_path)
    (Dd().input("/dev/urandom").output(corrupted_md.full_path).block_size(
        bs).count(dd_count).seek(offset_to_corrupt).conv("notrunc").run())
    corrupted_md.refresh_item()

    return corrupted_md
示例#9
0
def uncompress_archive(file, destination=None):
    from test_utils.filesystem.file import File

    if not isinstance(file, File):
        file = File(file)
    if not destination:
        destination = file.parent_dir
    command = (
        f"unzip -u {file.full_path} -d {destination}"
        if str(file).endswith(".zip") else
        f"tar --extract --file={file.full_path} --directory={destination}")
    TestProperties.execute_command_and_check_if_passed(command)
示例#10
0
def test_load_after_clean_shutdown(reboot_type, cache_mode, filesystem):
    """
        title: Planned system shutdown test.
        description: Test for data consistency after clean system shutdown.
        pass_criteria:
          - DUT should reboot successfully.
          - Checksum of file on core device should be the same before and after reboot.
    """
    with TestRun.step("Prepare CAS device."):
        cache_disk = TestRun.disks['cache']
        cache_disk.create_partitions([Size(1, Unit.GibiByte)])
        cache_dev = cache_disk.partitions[0]
        core_dev = TestRun.disks['core']
        cache = casadm.start_cache(cache_dev, cache_mode, force=True)
        core = cache.add_core(core_dev)
        core.create_filesystem(filesystem,
                               blocksize=int(Size(1, Unit.Blocks4096)))
        core.mount(mount_point)

    with TestRun.step("Create file on cache and count its checksum."):
        test_file = File(os.path.join(mount_point, "test_file"))
        Dd()\
            .input("/dev/zero")\
            .output(test_file.full_path)\
            .block_size(Size(1, Unit.KibiByte))\
            .count(1024)\
            .run()
        test_file.refresh_item()
        test_file_md5 = test_file.md5sum()
        sync()
        drop_caches(DropCachesMode.ALL)

    with TestRun.step("Reset platform."):
        if reboot_type == "soft":
            TestRun.executor.reboot()
        else:
            power_control = TestRun.plugin_manager.get_plugin('power_control')
            power_control.power_cycle()

    with TestRun.step("Load cache."):
        casadm.load_cache(cache_dev)
        core.mount(mount_point)

    with TestRun.step("Check file md5sum."):
        test_file.refresh_item()
        if test_file_md5 != test_file.md5sum():
            TestRun.LOGGER.error(
                "Checksums does not match - file is corrupted.")
        else:
            TestRun.LOGGER.info("File checksum is correct.")

    with TestRun.step("Remove test file."):
        test_file.remove()
 def create_files_and_check_classification():
     TestRun.LOGGER.info("Creating files belonging to different IO classes "
                         "(classification by writes).")
     for size, ioclass_id in size_to_class.items():
         occupancy_before = cache.get_cache_statistics(io_class_id=ioclass_id)["occupancy"]
         file_path = f"{mountpoint}/test_file_{size.get_value()}"
         Dd().input("/dev/zero").output(file_path).oflag("sync").block_size(size).count(1).run()
         occupancy_after = cache.get_cache_statistics(io_class_id=ioclass_id)["occupancy"]
         if occupancy_after != occupancy_before + size:
             pytest.xfail("File not cached properly!\n"
                          f"Expected {occupancy_before + size}\n"
                          f"Actual {occupancy_after}")
         test_files.append(File(file_path).refresh_item())
     sync()
     drop_caches(DropCachesMode.ALL)
def test_ioclass_metadata(filesystem):
    """
        title: Metadata IO classification.
        description: |
          Determine if every operation on files that cause metadata update results in increased
          writes to cached metadata.
        pass_criteria:
          - No kernel bug.
          - Metadata is classified properly.
    """
    # Exact values may not be tested as each file system has different metadata structure.
    test_dir_path = f"{mountpoint}/test_dir"

    with TestRun.step("Prepare cache and core. Disable udev."):
        cache, core = prepare()
        Udev.disable()

    with TestRun.step("Prepare and load IO class config file."):
        ioclass_id = random.randint(1, ioclass_config.MAX_IO_CLASS_ID)
        # metadata IO class
        ioclass_config.add_ioclass(
            ioclass_id=ioclass_id,
            eviction_priority=1,
            allocation=True,
            rule="metadata&done",
            ioclass_config_path=ioclass_config_path,
        )
        casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)

    with TestRun.step(f"Prepare {filesystem.name} filesystem and mount {core.path} "
                      f"at {mountpoint}."):
        core.create_filesystem(filesystem)
        core.mount(mountpoint)
        sync()

    with TestRun.step("Create 20 test files."):
        requests_to_metadata_before = cache.get_io_class_statistics(
            io_class_id=ioclass_id).request_stats.write
        files = []
        for i in range(1, 21):
            file_path = f"{mountpoint}/test_file_{i}"
            dd = (
                Dd().input("/dev/urandom")
                    .output(file_path)
                    .count(random.randint(5, 50))
                    .block_size(Size(1, Unit.MebiByte))
                    .oflag("sync")
            )
            dd.run()
            files.append(File(file_path))

    with TestRun.step("Check requests to metadata."):
        requests_to_metadata_after = cache.get_io_class_statistics(
            io_class_id=ioclass_id).request_stats.write
        if requests_to_metadata_after == requests_to_metadata_before:
            TestRun.fail("No requests to metadata while creating files!")

    with TestRun.step("Rename all test files."):
        requests_to_metadata_before = requests_to_metadata_after
        for file in files:
            file.move(f"{file.full_path}_renamed")
        sync()

    with TestRun.step("Check requests to metadata."):
        requests_to_metadata_after = cache.get_io_class_statistics(
            io_class_id=ioclass_id).request_stats.write
        if requests_to_metadata_after == requests_to_metadata_before:
            TestRun.fail("No requests to metadata while renaming files!")

    with TestRun.step(f"Create directory {test_dir_path}."):
        requests_to_metadata_before = requests_to_metadata_after
        fs_utils.create_directory(path=test_dir_path)

        TestRun.LOGGER.info(f"Moving test files into {test_dir_path}")
        for file in files:
            file.move(test_dir_path)
        sync()

    with TestRun.step("Check requests to metadata."):
        requests_to_metadata_after = cache.get_io_class_statistics(
            io_class_id=ioclass_id).request_stats.write
        if requests_to_metadata_after == requests_to_metadata_before:
            TestRun.fail("No requests to metadata while moving files!")

    with TestRun.step(f"Remove {test_dir_path}."):
        fs_utils.remove(path=test_dir_path, force=True, recursive=True)

    with TestRun.step("Check requests to metadata."):
        requests_to_metadata_after = cache.get_io_class_statistics(
            io_class_id=ioclass_id).request_stats.write
        if requests_to_metadata_after == requests_to_metadata_before:
            TestRun.fail("No requests to metadata while deleting directory with files!")
示例#13
0
def get_kernel_module_parameter(module_name, parameter):
    param_file_path = f"/sys/module/{module_name}/parameters/{parameter}"
    if not check_if_file_exists(param_file_path):
        raise FileNotFoundError(f"File {param_file_path} does not exist!")
    return File(param_file_path).read()
def test_ioclass_stats_sum(prepare_and_cleanup):
    """Check if stats for all set ioclasses sum up to cache stats"""
    cache, core = prepare()
    min_ioclass_id = 1
    max_ioclass_id = 11
    file_size_base = Unit.KibiByte.value * 4

    TestProperties.LOGGER.info("Preparing ioclass config file")
    ioclass_config.create_ioclass_config(
        add_default_rule=True, ioclass_config_path=ioclass_config_path
    )
    for i in range(min_ioclass_id, max_ioclass_id):
        ioclass_config.add_ioclass(
            ioclass_id=i,
            eviction_priority=22,
            allocation=True,
            rule=f"file_size:le:{file_size_base*i}&done",
            ioclass_config_path=ioclass_config_path,
        )
    cache.load_io_class(ioclass_config_path)

    TestProperties.LOGGER.info("Generating files with particular sizes")
    files_list = []
    for i in range(min_ioclass_id, max_ioclass_id):
        path = f"/tmp/test_file_{file_size_base*i}"
        File.create_file(path)
        f = File(path)
        f.padding(Size(file_size_base * i, Unit.Byte))
        files_list.append(f)

    core.create_filesystem(Filesystem.ext4)

    cache.reset_counters()

    # Name of stats, which should not be compared
    not_compare_stats = ["clean", "occupancy"]
    ioclass_id_list = list(range(min_ioclass_id, max_ioclass_id))
    # Append default ioclass id
    ioclass_id_list.append(0)
    TestProperties.LOGGER.info("Copying files to mounted core and stats check")
    for f in files_list:
        # To prevent stats pollution by filesystem requests, umount core device
        # after file is copied
        core.mount(mountpoint)
        f.copy(mountpoint)
        sync()
        core.unmount()
        sync()

        cache_stats = cache.get_cache_statistics(
            stat_filter=[StatsFilter.usage, StatsFilter.req, StatsFilter.blk]
        )
        for ioclass_id in ioclass_id_list:
            ioclass_stats = cache.get_cache_statistics(
                stat_filter=[StatsFilter.usage, StatsFilter.req, StatsFilter.blk],
                io_class_id=ioclass_id,
            )
            for stat_name in cache_stats:
                if stat_name in not_compare_stats:
                    continue
                cache_stats[stat_name] -= ioclass_stats[stat_name]

        for stat_name in cache_stats:
            if stat_name in not_compare_stats:
                continue
            stat_val = (
                cache_stats[stat_name].get_value()
                if isinstance(cache_stats[stat_name], Size)
                else cache_stats[stat_name]
            )
            assert stat_val == 0, f"{stat_name} diverged!\n"

    # Test cleanup
    for f in files_list:
        f.remove()
def test_cas_startup(cache_mode, filesystem):
    """
    title: Test for starting CAS on system startup.
    pass_criteria:
      - System does not crash.
      - CAS modules are loaded before partitions are mounted.
      - Cache is loaded before partitions are mounted.
      - Exported object is mounted after startup is complete.
    """
    with TestRun.step(
            "Prepare partitions for cache (200MiB) and for core (400MiB)"):
        cache_dev = TestRun.disks['cache']
        cache_dev.create_partitions([Size(200, Unit.MebiByte)])
        cache_part = cache_dev.partitions[0]
        core_dev = TestRun.disks['core']
        core_dev.create_partitions([Size(400, Unit.MebiByte)])
        core_part = core_dev.partitions[0]

    with TestRun.step("Start cache and add core"):
        cache = casadm.start_cache(cache_part, cache_mode, force=True)
        core = cache.add_core(core_part)

    with TestRun.step("Create and mount filesystem"):
        core.create_filesystem(filesystem)
        core.mount(mountpoint)

    with TestRun.step("Create test file and calculate md5 checksum"):
        (Dd().input("/dev/urandom").output(filepath).count(16).block_size(
            Size(1, Unit.MebiByte)).run())
        test_file = File(filepath)
        md5_before = test_file.md5sum()

    with TestRun.step("Add mountpoint fstab and create intelcas.conf"):
        fstab.add_mountpoint(device=core,
                             mount_point=mountpoint,
                             fs_type=filesystem)
        InitConfig.create_init_config_from_running_configuration()

    with TestRun.step("Reboot"):
        TestRun.executor.reboot()

    with TestRun.step("Check if cache is started"):
        caches = list(get_caches())
        if len(caches) != 1:
            TestRun.fail(f"Expected one cache, got {len(caches)}!")
        if caches[0].cache_id != cache.cache_id:
            TestRun.fail("Invalid cache id!")

    with TestRun.step("Check if core is added"):
        cores = list(get_cores(cache.cache_id))
        if len(cores) != 1:
            TestRun.fail(f"Expected one core, got {len(cores)}!")
        if cores[0].core_id != core.core_id:
            TestRun.fail("Invalid core id!")

    with TestRun.step("Check if filesystem is mounted"):
        if not core.is_mounted():
            TestRun.fail("Core is not mounted!")

    with TestRun.step("Check if md5 checksum matches"):
        md5_after = test_file.md5sum()
        if md5_before != md5_after:
            TestRun.fail("md5 checksum mismatch!")

    with TestRun.step("Test cleanup"):
        fstab.remove_mountpoint(device=core)
        core.unmount()
        InitConfig.create_default_init_config()
        casadm.stop_all_caches()
def test_ioclass_directory_file_operations(filesystem):
    """
        title: Test IO classification by file operations.
        description: |
          Test if directory classification works properly after file operations like move or rename.
        pass_criteria:
          - No kernel bug.
          - The operations themselves should not cause reclassification but IO after those
            operations should be reclassified to proper IO class.
    """

    test_dir_path = f"{mountpoint}/test_dir"
    nested_dir_path = f"{test_dir_path}/nested_dir"
    dd_blocks = random.randint(5, 50)

    with TestRun.step("Prepare cache and core."):
        cache, core = prepare()
        Udev.disable()

    with TestRun.step("Create and load IO class config file."):
        ioclass_id = random.randint(1, ioclass_config.MAX_IO_CLASS_ID)
        # directory IO class
        ioclass_config.add_ioclass(
            ioclass_id=ioclass_id,
            eviction_priority=1,
            allocation=True,
            rule=f"directory:{test_dir_path}",
            ioclass_config_path=ioclass_config_path,
        )
        casadm.load_io_classes(cache_id=cache.cache_id,
                               file=ioclass_config_path)

    with TestRun.step(f"Prepare {filesystem.name} filesystem "
                      f"and mounting {core.system_path} at {mountpoint}."):
        core.create_filesystem(fs_type=filesystem)
        core.mount(mount_point=mountpoint)
        sync()

    with TestRun.step(f"Create directory {nested_dir_path}."):
        Directory.create_directory(path=nested_dir_path, parents=True)
        sync()
        drop_caches(DropCachesMode.ALL)

    with TestRun.step("Create test file."):
        classified_before = cache.get_io_class_statistics(
            io_class_id=ioclass_id).usage_stats.occupancy
        file_path = f"{test_dir_path}/test_file"
        (Dd().input("/dev/urandom").output(file_path).oflag("sync").block_size(
            Size(1, Unit.MebiByte)).count(dd_blocks).run())
        sync()
        drop_caches(DropCachesMode.ALL)
        test_file = File(file_path).refresh_item()

    with TestRun.step("Check classified occupancy."):
        classified_after = cache.get_io_class_statistics(
            io_class_id=ioclass_id).usage_stats.occupancy
        check_occupancy(classified_before + test_file.size, classified_after)

    with TestRun.step("Move test file out of classified directory."):
        classified_before = classified_after
        non_classified_before = cache.get_io_class_statistics(
            io_class_id=0).usage_stats.occupancy
        test_file.move(destination=mountpoint)
        sync()
        drop_caches(DropCachesMode.ALL)

    with TestRun.step("Check classified occupancy."):
        classified_after = cache.get_io_class_statistics(
            io_class_id=ioclass_id).usage_stats.occupancy
        check_occupancy(classified_before, classified_after)
        TestRun.LOGGER.info("Checking non-classified occupancy")
        non_classified_after = cache.get_io_class_statistics(
            io_class_id=0).usage_stats.occupancy
        check_occupancy(non_classified_before, non_classified_after)

    with TestRun.step("Read test file."):
        classified_before = classified_after
        non_classified_before = non_classified_after
        (Dd().input(test_file.full_path).output("/dev/null").block_size(
            Size(1, Unit.MebiByte)).run())

    with TestRun.step("Check classified occupancy."):
        classified_after = cache.get_io_class_statistics(
            io_class_id=ioclass_id).usage_stats.occupancy
        check_occupancy(classified_before - test_file.size, classified_after)
        TestRun.LOGGER.info("Checking non-classified occupancy")
        non_classified_after = cache.get_io_class_statistics(
            io_class_id=0).usage_stats.occupancy
        check_occupancy(non_classified_before + test_file.size,
                        non_classified_after)

    with TestRun.step(f"Move test file to {nested_dir_path}."):
        classified_before = classified_after
        non_classified_before = non_classified_after
        test_file.move(destination=nested_dir_path)
        sync()
        drop_caches(DropCachesMode.ALL)

    with TestRun.step("Check classified occupancy."):
        classified_after = cache.get_io_class_statistics(
            io_class_id=ioclass_id).usage_stats.occupancy
        check_occupancy(classified_before, classified_after)
        TestRun.LOGGER.info("Checking non-classified occupancy")
        non_classified_after = cache.get_io_class_statistics(
            io_class_id=0).usage_stats.occupancy
        check_occupancy(non_classified_before, non_classified_after)

    with TestRun.step("Read test file."):
        classified_before = classified_after
        non_classified_before = non_classified_after
        (Dd().input(test_file.full_path).output("/dev/null").block_size(
            Size(1, Unit.MebiByte)).run())

    with TestRun.step("Check classified occupancy."):
        classified_after = cache.get_io_class_statistics(
            io_class_id=ioclass_id).usage_stats.occupancy
        check_occupancy(classified_before + test_file.size, classified_after)

    with TestRun.step("Check non-classified occupancy."):
        non_classified_after = cache.get_io_class_statistics(
            io_class_id=0).usage_stats.occupancy
        check_occupancy(non_classified_before - test_file.size,
                        non_classified_after)
def test_ioclass_stats_sum(random_cls):
    """
        title: Test for sum of IO class statistics.
        description: |
          Check if statistics for configured IO classes sum up to cache/core statistics.
        pass_criteria:
          - Per class cache IO class statistics sum up to cache statistics.
          - Per class core IO class statistics sum up to core statistics.
    """

    min_ioclass_id = 1
    max_ioclass_id = 11
    file_size_base = Unit.Blocks4096.value

    with TestRun.step("Test prepare"):
        caches, cores = prepare(random_cls)
        cache, core = caches[0], cores[0]

    with TestRun.step("Prepare IO class config file"):
        ioclass_list = []
        for class_id in range(min_ioclass_id, max_ioclass_id):
            ioclass_list.append(
                IoClass(class_id=class_id,
                        rule=f"file_size:le:{file_size_base * class_id}&done",
                        priority=22))
        IoClass.save_list_to_config_file(ioclass_list, True)

    with TestRun.step("Load IO class config file"):
        cache.load_io_class(ioclass_config.default_config_file_path)

    with TestRun.step(
            "Generate files with particular sizes in temporary folder"):
        files_list = []
        for class_id in range(min_ioclass_id, max_ioclass_id):
            path = f"/tmp/test_file_{file_size_base * class_id}"
            File.create_file(path)
            f = File(path)
            f.padding(Size(file_size_base * class_id, Unit.Byte))
            files_list.append(f)

    with TestRun.step("Copy files to mounted core"):
        core.mount(mountpoint)
        for f in files_list:
            TestRun.LOGGER.info(f"Copying file {f.name} to mounted core")
            f.copy(mountpoint)
            sync()
        # To prevent stats pollution by filesystem requests, umount core device
        # after files are copied
        core.unmount()
        sync()

    with TestRun.step(
            "Check if per class cache IO class statistics sum up to cache statistics"
    ):
        # Name of stats, which should not be compared
        not_compare_stats = ["clean", "occupancy", "free"]
        ioclass_id_list = list(range(min_ioclass_id, max_ioclass_id))
        # Append default IO class id
        ioclass_id_list.append(0)

        cache_stats = cache.get_statistics_flat(
            stat_filter=[StatsFilter.usage, StatsFilter.req, StatsFilter.blk])
        for ioclass_id in ioclass_id_list:
            ioclass_stats = cache.get_statistics_flat(
                stat_filter=[
                    StatsFilter.usage, StatsFilter.req, StatsFilter.blk
                ],
                io_class_id=ioclass_id,
            )
            for stat_name in cache_stats:
                if stat_name in not_compare_stats:
                    continue
                cache_stats[stat_name] -= ioclass_stats[stat_name]

        for stat_name in cache_stats:
            if stat_name in not_compare_stats:
                continue
            stat_val = (cache_stats[stat_name].get_value() if isinstance(
                cache_stats[stat_name], Size) else cache_stats[stat_name])
            if stat_val != 0:
                TestRun.LOGGER.error(f"{stat_name} diverged for cache!\n")

    with TestRun.step(
            "Check if per class core IO class statistics sum up to core statistics"
    ):
        core_stats = core.get_statistics_flat(
            stat_filter=[StatsFilter.usage, StatsFilter.req, StatsFilter.blk])
        for ioclass_id in ioclass_id_list:
            ioclass_stats = core.get_statistics_flat(
                stat_filter=[
                    StatsFilter.usage, StatsFilter.req, StatsFilter.blk
                ],
                io_class_id=ioclass_id,
            )
            for stat_name in core_stats:
                if stat_name in not_compare_stats:
                    continue
                core_stats[stat_name] -= ioclass_stats[stat_name]

        for stat_name in core_stats:
            if stat_name in not_compare_stats:
                continue
            stat_val = (core_stats[stat_name].get_value() if isinstance(
                core_stats[stat_name], Size) else core_stats[stat_name])
            if stat_val != 0:
                TestRun.LOGGER.error(f"{stat_name} diverged for core!\n")

        with TestRun.step("Test cleanup"):
            for f in files_list:
                f.remove()
def test_ioclass_directory_depth(filesystem):
    """
        title: Test IO classification by directory.
        description: |
          Test if directory classification works properly for deeply nested directories for read and
          write operations.
        pass_criteria:
          - No kernel bug.
          - Read and write operations to directories are classified properly.
    """
    base_dir_path = f"{mountpoint}/base_dir"

    with TestRun.step("Prepare cache and core."):
        cache, core = prepare()
        Udev.disable()

    with TestRun.step(
            f"Prepare {filesystem.name} filesystem and mount {core.system_path} "
            f"at {mountpoint}."):
        core.create_filesystem(filesystem)
        core.mount(mountpoint)
        sync()

    with TestRun.step(f"Create the base directory: {base_dir_path}."):
        fs_utils.create_directory(base_dir_path)

    with TestRun.step(f"Create a nested directory."):
        nested_dir_path = base_dir_path
        random_depth = random.randint(40, 80)
        for i in range(random_depth):
            nested_dir_path += f"/dir_{i}"
        fs_utils.create_directory(path=nested_dir_path, parents=True)

    # Test classification in nested dir by reading a previously unclassified file
    with TestRun.step("Create the first file in the nested directory."):
        test_file_1 = File(f"{nested_dir_path}/test_file_1")
        dd = (Dd().input("/dev/urandom").output(test_file_1.full_path).count(
            random.randint(1, 200)).block_size(Size(1, Unit.MebiByte)))
        dd.run()
        sync()
        drop_caches(DropCachesMode.ALL)
        test_file_1.refresh_item()

    with TestRun.step("Load IO class config."):
        ioclass_id = random.randint(1, ioclass_config.MAX_IO_CLASS_ID)
        # directory IO class
        ioclass_config.add_ioclass(
            ioclass_id=ioclass_id,
            eviction_priority=1,
            allocation=True,
            rule=f"directory:{base_dir_path}",
            ioclass_config_path=ioclass_config_path,
        )
        casadm.load_io_classes(cache_id=cache.cache_id,
                               file=ioclass_config_path)

    with TestRun.step("Read the file in the nested directory"):
        base_occupancy = cache.get_io_class_statistics(
            io_class_id=ioclass_id).usage_stats.occupancy
        dd = (Dd().input(test_file_1.full_path).output("/dev/null").block_size(
            Size(1, Unit.MebiByte)))
        dd.run()

    with TestRun.step("Check occupancy after creating the file."):
        new_occupancy = cache.get_io_class_statistics(
            io_class_id=ioclass_id).usage_stats.occupancy
        if new_occupancy != base_occupancy + test_file_1.size:
            TestRun.LOGGER.error(
                "Wrong occupancy after reading file!\n"
                "Expected: {base_occupancy + test_file_1.size}, "
                f"actual: {new_occupancy}")

    # Test classification in nested dir by creating a file
    with TestRun.step("Create the second file in the nested directory"):
        base_occupancy = new_occupancy
        test_file_2 = File(f"{nested_dir_path}/test_file_2")
        dd = (Dd().input("/dev/urandom").output(test_file_2.full_path).count(
            random.randint(1, 200)).block_size(Size(1, Unit.MebiByte)))
        dd.run()
        sync()
        drop_caches(DropCachesMode.ALL)
        test_file_2.refresh_item()

    with TestRun.step("Check occupancy after creating the second file."):
        new_occupancy = cache.get_io_class_statistics(
            io_class_id=ioclass_id).usage_stats.occupancy
        if new_occupancy != base_occupancy + test_file_2.size:
            TestRun.LOGGER.error(
                "Wrong occupancy after creating file!\n"
                f"Expected: {base_occupancy + test_file_2.size}, "
                f"actual: {new_occupancy}")
def test_ioclass_file_size(filesystem):
    """
        title: Test IO classification by file size.
        description: Test if file size classification works properly.
        pass_criteria:
          - No kernel bug.
          - IO is classified properly based on IO class rule with file size.
    """

    # File size IO class rules are configured in a way that each tested file size is unambiguously
    # classified.
    # Firstly write operations are tested (creation of files), secondly read operations.

    base_size = Size(random.randint(50, 1000) * 2, Unit.Blocks4096)
    size_to_class = {
        base_size: 1,
        base_size - Unit.Blocks4096: 2,
        base_size + Unit.Blocks4096: 3,
        base_size / 2: 4,
        base_size / 2 - Unit.Blocks4096: 4,
        base_size / 2 + Unit.Blocks4096: 2,
        base_size * 2: 5,
        base_size * 2 - Unit.Blocks4096: 3,
        base_size * 2 + Unit.Blocks4096: 5,
    }

    with TestRun.step("Prepare cache and core."):
        cache, core = prepare(default_allocation="1.00")

    with TestRun.step("Prepare and load IO class config."):
        load_file_size_io_classes(cache, base_size)

    with TestRun.step(
            f"Prepare {filesystem.name} filesystem and mount {core.path} "
            f"at {mountpoint}."):
        core.create_filesystem(filesystem)
        core.mount(mountpoint)
        sync()

    with TestRun.step(
            "Create files belonging to different IO classes (classification by writes)."
    ):
        test_files = []
        for size, ioclass_id in size_to_class.items():
            occupancy_before = cache.get_io_class_statistics(
                io_class_id=ioclass_id).usage_stats.occupancy
            file_path = f"{mountpoint}/test_file_{size.get_value()}"
            Dd().input("/dev/zero").output(file_path).oflag("sync").block_size(
                size).count(1).run()
            sync()
            drop_caches(DropCachesMode.ALL)
            occupancy_after = cache.get_io_class_statistics(
                io_class_id=ioclass_id).usage_stats.occupancy
            if occupancy_after != occupancy_before + size:
                TestRun.fail("File not cached properly!\n"
                             f"Expected {occupancy_before + size}\n"
                             f"Actual {occupancy_after}")
            test_files.append(File(file_path).refresh_item())
        sync()
        drop_caches(DropCachesMode.ALL)

    with TestRun.step("Move all files to 'unclassified' IO class."):
        ioclass_config.remove_ioclass_config(
            ioclass_config_path=ioclass_config_path)
        ioclass_config.create_ioclass_config(
            add_default_rule=False, ioclass_config_path=ioclass_config_path)
        ioclass_config.add_ioclass(
            ioclass_id=0,
            eviction_priority=22,
            allocation="1.00",
            rule="unclassified",
            ioclass_config_path=ioclass_config_path,
        )
        ioclass_config.add_ioclass(
            ioclass_id=6,
            eviction_priority=1,
            allocation="0.00",
            rule=f"metadata",
            ioclass_config_path=ioclass_config_path,
        )
        casadm.load_io_classes(cache_id=cache.cache_id,
                               file=ioclass_config_path)
        occupancy_before = cache.get_io_class_statistics(
            io_class_id=0).usage_stats.occupancy
        for file in test_files:
            Dd().input(file.full_path).output("/dev/null").block_size(
                file.size).run()
            sync()
            drop_caches(DropCachesMode.ALL)
            occupancy_after = cache.get_io_class_statistics(
                io_class_id=0).usage_stats.occupancy
            occupancy_expected = occupancy_before + file.size
            if occupancy_after != occupancy_expected:
                TestRun.fail("File not reclassified properly!\n"
                             f"Expected {occupancy_expected}\n"
                             f"Actual {occupancy_after}")
            occupancy_before = occupancy_after
        sync()
        drop_caches(DropCachesMode.ALL)

    with TestRun.step("Restore IO class configuration."):
        ioclass_config.remove_ioclass_config(
            ioclass_config_path=ioclass_config_path)
        ioclass_config.create_ioclass_config(
            add_default_rule=False, ioclass_config_path=ioclass_config_path)
        ioclass_config.add_ioclass(
            ioclass_id=0,
            eviction_priority=22,
            allocation="1.00",
            rule="unclassified",
            ioclass_config_path=ioclass_config_path,
        )
        load_file_size_io_classes(cache, base_size)

    with TestRun.step(
            "Read files belonging to different IO classes (classification by reads)."
    ):
        # CAS device should be unmounted and mounted because data can be sometimes still cached by
        # OS cache so occupancy statistics will not match
        core.unmount()
        core.mount(mountpoint)
        for file in test_files:
            ioclass_id = size_to_class[file.size]
            occupancy_before = cache.get_io_class_statistics(
                io_class_id=ioclass_id).usage_stats.occupancy
            Dd().input(file.full_path).output("/dev/null").block_size(
                file.size).run()
            sync()
            drop_caches(DropCachesMode.ALL)
            occupancy_after = cache.get_io_class_statistics(
                io_class_id=ioclass_id).usage_stats.occupancy
            actual_blocks = occupancy_after.get_value(Unit.Blocks4096)
            expected_blocks = (occupancy_before + file.size).get_value(
                Unit.Blocks4096)
            if actual_blocks != expected_blocks:
                TestRun.fail("File not reclassified properly!\n"
                             f"Expected {occupancy_before + file.size}\n"
                             f"Actual {occupancy_after}")
        sync()
        drop_caches(DropCachesMode.ALL)
def test_ioclass_metadata(filesystem):
    """
    Perform operations on files that cause metadata update.
    Determine if every such operation results in increased writes to cached metadata.
    Exact values may not be tested as each file system has different metadata structure.
    """
    cache, core = prepare()
    Udev.disable()

    ioclass_id = random.randint(1, ioclass_config.MAX_IO_CLASS_ID)
    # metadata IO class
    ioclass_config.add_ioclass(
        ioclass_id=ioclass_id,
        eviction_priority=1,
        allocation=True,
        rule="metadata&done",
        ioclass_config_path=ioclass_config_path,
    )
    casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)

    TestRun.LOGGER.info(f"Preparing {filesystem.name} filesystem "
                        f"and mounting {core.system_path} at {mountpoint}")
    core.create_filesystem(filesystem)
    core.mount(mountpoint)
    sync()

    requests_to_metadata_before = cache.get_statistics_deprecated(
        io_class_id=ioclass_id)["write total"]
    TestRun.LOGGER.info("Creating 20 test files")
    files = []
    for i in range(1, 21):
        file_path = f"{mountpoint}/test_file_{i}"
        dd = (Dd().input("/dev/urandom").output(file_path).count(
            random.randint(5,
                           50)).block_size(Size(1,
                                                Unit.MebiByte)).oflag("sync"))
        dd.run()
        files.append(File(file_path))

    TestRun.LOGGER.info("Checking requests to metadata")
    requests_to_metadata_after = cache.get_statistics_deprecated(
        io_class_id=ioclass_id)["write total"]
    if requests_to_metadata_after == requests_to_metadata_before:
        pytest.xfail("No requests to metadata while creating files!")

    requests_to_metadata_before = requests_to_metadata_after
    TestRun.LOGGER.info("Renaming all test files")
    for file in files:
        file.move(f"{file.full_path}_renamed")
    sync()

    TestRun.LOGGER.info("Checking requests to metadata")
    requests_to_metadata_after = cache.get_statistics_deprecated(
        io_class_id=ioclass_id)["write total"]
    if requests_to_metadata_after == requests_to_metadata_before:
        pytest.xfail("No requests to metadata while renaming files!")

    requests_to_metadata_before = requests_to_metadata_after
    test_dir_path = f"{mountpoint}/test_dir"
    TestRun.LOGGER.info(f"Creating directory {test_dir_path}")
    fs_utils.create_directory(path=test_dir_path)

    TestRun.LOGGER.info(f"Moving test files into {test_dir_path}")
    for file in files:
        file.move(test_dir_path)
    sync()

    TestRun.LOGGER.info("Checking requests to metadata")
    requests_to_metadata_after = cache.get_statistics_deprecated(
        io_class_id=ioclass_id)["write total"]
    if requests_to_metadata_after == requests_to_metadata_before:
        pytest.xfail("No requests to metadata while moving files!")

    TestRun.LOGGER.info(f"Removing {test_dir_path}")
    fs_utils.remove(path=test_dir_path, force=True, recursive=True)

    TestRun.LOGGER.info("Checking requests to metadata")
    requests_to_metadata_after = cache.get_statistics_deprecated(
        io_class_id=ioclass_id)["write total"]
    if requests_to_metadata_after == requests_to_metadata_before:
        pytest.xfail(
            "No requests to metadata while deleting directory with files!")
示例#21
0
def test_recovery_all_options(cache_mode, cache_line_size, cleaning_policy,
                              filesystem):
    """
        title: Test for recovery after reset with various cache options.
        description: Verify that unflushed data can be safely recovered after reset.
        pass_criteria:
          - CAS recovers successfully after reboot
          - No data corruption
    """
    with TestRun.step("Prepare cache and core devices."):
        cache_disk = TestRun.disks['cache']
        core_disk = TestRun.disks['core']
        cache_disk.create_partitions([Size(200, Unit.MebiByte)])
        core_disk.create_partitions([Size(2000, Unit.MebiByte)] * 2)
        cache_device = cache_disk.partitions[0]
        core_device = core_disk.partitions[0]

        test_file = File(os.path.join(mount_point, filename))
        file_operation(test_file.full_path, pattern, ReadWrite.write)
        file_md5 = test_file.md5sum()

    with TestRun.step(f"Make {filesystem} on core device."):
        core_device.create_filesystem(filesystem)

    with TestRun.step("Mount core device."):
        core_device.mount(mount_point)
        file_operation(test_file.full_path, other_pattern, ReadWrite.write)
        os_utils.drop_caches(DropCachesMode.ALL)

    with TestRun.step("Unmount core device."):
        core_device.unmount()

    with TestRun.step(
            f"Start cache in {cache_mode.name} with given configuration."):
        cache = casadm.start_cache(cache_device,
                                   cache_mode,
                                   cache_line_size,
                                   force=True)
        cache.set_cleaning_policy(cleaning_policy)
        if cleaning_policy == CleaningPolicy.acp:
            cache.set_params_acp(
                FlushParametersAcp(wake_up_time=Time(seconds=1)))

    with TestRun.step("Add core."):
        core = cache.add_core(core_device)

    with TestRun.step("Mount CAS device."):
        core.mount(mount_point)
        file_operation(test_file.full_path, pattern, ReadWrite.write)

    with TestRun.step(
            "Change cache mode to Write-Through without flush option."):
        cache.set_cache_mode(CacheMode.WT, flush=False)

    with TestRun.step("Reset platform."):
        os_utils.sync()
        core.unmount()
        TestRun.LOGGER.info(
            f"Number of dirty blocks in cache: {cache.get_dirty_blocks()}")
        power_cycle_dut()

    with TestRun.step("Try to start cache without load and force option."):
        try:
            casadm.start_cache(cache_device, cache_mode, cache_line_size)
            TestRun.fail("Cache started without load or force option.")
        except Exception:
            TestRun.LOGGER.info(
                "Cache did not start without load and force option.")

    with TestRun.step("Load cache and stop it with flush."):
        cache = casadm.load_cache(cache_device)
        cache.stop()

    with TestRun.step("Check md5sum of tested file on core device."):
        core_device.mount(mount_point)
        cas_md5 = test_file.md5sum()
        core_device.unmount()
        if cas_md5 == file_md5:
            TestRun.LOGGER.info(
                "Source and target file checksums are identical.")
        else:
            TestRun.fail("Source and target file checksums are different.")
示例#22
0
def test_data_integrity_5d_dss(filesystems):
    """
        title: |
          Data integrity test on three cas instances with different
          file systems with duration time equal to 5 days
        description: |
          Create 3 cache instances on caches equal to 50GB and cores equal to 150GB
          with different file systems, and run workload with data verification.
        pass_criteria:
            - System does not crash.
            - All operations complete successfully.
            - Data consistency is being preserved.
    """
    with TestRun.step("Prepare cache and core devices"):
        cache_devices, core_devices = prepare_devices()

    with TestRun.step(
            "Run 4 cache instances in different cache modes, add single core to each"
    ):
        cache_modes = [CacheMode.WT, CacheMode.WB, CacheMode.WA, CacheMode.WO]
        caches = []
        cores = []
        for i in range(4):
            cache, core = start_instance(cache_devices[i], core_devices[i],
                                         cache_modes[i])
            caches.append(cache)
            cores.append(core)

    with TestRun.step("Load default io class config for each cache"):
        for cache in caches:
            cache.load_io_class("/etc/opencas/ioclass-config.csv")

    with TestRun.step("Create filesystems and mount cores"):
        for i, core in enumerate(cores):
            mount_point = core.path.replace('/dev/', '/mnt/')
            if not fs_utils.check_if_directory_exists(mount_point):
                fs_utils.create_directory(mount_point)
            TestRun.LOGGER.info(
                f"Create filesystem {filesystems[i].name} on {core.path}")
            core.create_filesystem(filesystems[i])
            TestRun.LOGGER.info(
                f"Mount filesystem {filesystems[i].name} on {core.path} to "
                f"{mount_point}")
            core.mount(mount_point)
            sync()

    with TestRun.step("Run test workloads on filesystems with verification"):
        fio_run = Fio().create_command()
        fio_run.io_engine(IoEngine.libaio)
        fio_run.direct()
        fio_run.time_based()
        fio_run.nr_files(4096)
        fio_run.file_size_range([(file_min_size, file_max_size)])
        fio_run.do_verify()
        fio_run.verify(VerifyMethod.md5)
        fio_run.verify_dump()
        fio_run.run_time(runtime)
        fio_run.read_write(ReadWrite.randrw)
        fio_run.io_depth(128)
        fio_run.blocksize_range([(start_size, stop_size)])
        for core in cores:
            fio_job = fio_run.add_job()
            fio_job.directory(core.mount_point)
            fio_job.size(core.size)
        fio_run.run()

    with TestRun.step("Unmount cores"):
        for core in cores:
            core.unmount()

    with TestRun.step("Calculate md5 for each core"):
        core_md5s = [File(core.full_path).md5sum() for core in cores]

    with TestRun.step("Stop caches"):
        for cache in caches:
            cache.stop()

    with TestRun.step("Calculate md5 for each core"):
        dev_md5s = [File(dev.full_path).md5sum() for dev in core_devices]

    with TestRun.step("Compare md5 sums for cores and core devices"):
        for core_md5, dev_md5, mode, fs in zip(core_md5s, dev_md5s,
                                               cache_modes, filesystems):
            if core_md5 != dev_md5:
                TestRun.fail(f"MD5 sums of core and core device do not match! "
                             f"Cache mode: {mode} Filesystem: {fs}")
def test_ioclass_directory_file_operations(filesystem):
    """
    Test if directory classification works properly after file operations like move or rename.
    The operations themselves should not cause reclassification but IO after those operations
    should be reclassified to proper IO class.
    """
    def check_occupancy(expected: Size, actual: Size):
        if expected != actual:
            pytest.xfail("Occupancy check failed!\n"
                         f"Expected: {expected}, actual: {actual}")

    cache, core = prepare()
    Udev.disable()
    test_dir_path = f"{mountpoint}/test_dir"
    nested_dir_path = f"{test_dir_path}/nested_dir"

    dd_blocks = random.randint(5, 50)

    ioclass_id = random.randint(1, ioclass_config.MAX_IO_CLASS_ID)
    # directory IO class
    ioclass_config.add_ioclass(
        ioclass_id=ioclass_id,
        eviction_priority=1,
        allocation=True,
        rule=f"directory:{test_dir_path}",
        ioclass_config_path=ioclass_config_path,
    )
    casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)

    TestRun.LOGGER.info(f"Preparing {filesystem.name} filesystem "
                        f"and mounting {core.system_path} at {mountpoint}")
    core.create_filesystem(fs_type=filesystem)
    core.mount(mount_point=mountpoint)
    sync()

    TestRun.LOGGER.info(f"Creating directory {nested_dir_path}")
    Directory.create_directory(path=nested_dir_path, parents=True)
    sync()
    drop_caches(DropCachesMode.ALL)

    TestRun.LOGGER.info("Creating test file")
    classified_before = cache.get_statistics_deprecated(
        io_class_id=ioclass_id)["occupancy"]
    file_path = f"{test_dir_path}/test_file"
    (Dd().input("/dev/urandom").output(file_path).oflag("sync").block_size(
        Size(1, Unit.MebiByte)).count(dd_blocks).run())
    sync()
    drop_caches(DropCachesMode.ALL)
    test_file = File(file_path).refresh_item()

    TestRun.LOGGER.info("Checking classified occupancy")
    classified_after = cache.get_statistics_deprecated(
        io_class_id=ioclass_id)["occupancy"]
    check_occupancy(classified_before + test_file.size, classified_after)

    TestRun.LOGGER.info("Moving test file out of classified directory")
    classified_before = classified_after
    non_classified_before = cache.get_statistics_deprecated(
        io_class_id=0)["occupancy"]
    test_file.move(destination=mountpoint)
    sync()
    drop_caches(DropCachesMode.ALL)

    TestRun.LOGGER.info("Checking classified occupancy")
    classified_after = cache.get_statistics_deprecated(
        io_class_id=ioclass_id)["occupancy"]
    check_occupancy(classified_before, classified_after)
    TestRun.LOGGER.info("Checking non-classified occupancy")
    non_classified_after = cache.get_statistics_deprecated(
        io_class_id=0)["occupancy"]
    check_occupancy(non_classified_before, non_classified_after)

    TestRun.LOGGER.info("Reading test file")
    classified_before = classified_after
    non_classified_before = non_classified_after
    (Dd().input(test_file.full_path).output("/dev/null").block_size(
        Size(1, Unit.MebiByte)).run())

    TestRun.LOGGER.info("Checking classified occupancy")
    classified_after = cache.get_statistics_deprecated(
        io_class_id=ioclass_id)["occupancy"]
    check_occupancy(classified_before - test_file.size, classified_after)
    TestRun.LOGGER.info("Checking non-classified occupancy")
    non_classified_after = cache.get_statistics_deprecated(
        io_class_id=0)["occupancy"]
    check_occupancy(non_classified_before + test_file.size,
                    non_classified_after)

    TestRun.LOGGER.info(f"Moving test file to {nested_dir_path}")
    classified_before = classified_after
    non_classified_before = non_classified_after
    test_file.move(destination=nested_dir_path)
    sync()
    drop_caches(DropCachesMode.ALL)

    TestRun.LOGGER.info("Checking classified occupancy")
    classified_after = cache.get_statistics_deprecated(
        io_class_id=ioclass_id)["occupancy"]
    check_occupancy(classified_before, classified_after)
    TestRun.LOGGER.info("Checking non-classified occupancy")
    non_classified_after = cache.get_statistics_deprecated(
        io_class_id=0)["occupancy"]
    check_occupancy(non_classified_before, non_classified_after)

    TestRun.LOGGER.info("Reading test file")
    classified_before = classified_after
    non_classified_before = non_classified_after
    (Dd().input(test_file.full_path).output("/dev/null").block_size(
        Size(1, Unit.MebiByte)).run())

    TestRun.LOGGER.info("Checking classified occupancy")
    classified_after = cache.get_statistics_deprecated(
        io_class_id=ioclass_id)["occupancy"]
    check_occupancy(classified_before + test_file.size, classified_after)
    TestRun.LOGGER.info("Checking non-classified occupancy")
    non_classified_after = cache.get_statistics_deprecated(
        io_class_id=0)["occupancy"]
    check_occupancy(non_classified_before - test_file.size,
                    non_classified_after)
def test_ioclass_directory_depth(filesystem):
    """
    Test if directory classification works properly for deeply nested directories for read and
    write operations.
    """
    cache, core = prepare()
    Udev.disable()

    TestRun.LOGGER.info(f"Preparing {filesystem.name} filesystem "
                        f"and mounting {core.system_path} at {mountpoint}")
    core.create_filesystem(filesystem)
    core.mount(mountpoint)
    sync()

    base_dir_path = f"{mountpoint}/base_dir"
    TestRun.LOGGER.info(f"Creating the base directory: {base_dir_path}")
    fs_utils.create_directory(base_dir_path)

    nested_dir_path = base_dir_path
    random_depth = random.randint(40, 80)
    for i in range(random_depth):
        nested_dir_path += f"/dir_{i}"
    TestRun.LOGGER.info(f"Creating a nested directory: {nested_dir_path}")
    fs_utils.create_directory(path=nested_dir_path, parents=True)

    # Test classification in nested dir by reading a previously unclassified file
    TestRun.LOGGER.info("Creating the first file in the nested directory")
    test_file_1 = File(f"{nested_dir_path}/test_file_1")
    dd = (Dd().input("/dev/urandom").output(test_file_1.full_path).count(
        random.randint(1, 200)).block_size(Size(1, Unit.MebiByte)))
    dd.run()
    sync()
    drop_caches(DropCachesMode.ALL)
    test_file_1.refresh_item()

    ioclass_id = random.randint(1, ioclass_config.MAX_IO_CLASS_ID)
    # directory IO class
    ioclass_config.add_ioclass(
        ioclass_id=ioclass_id,
        eviction_priority=1,
        allocation=True,
        rule=f"directory:{base_dir_path}",
        ioclass_config_path=ioclass_config_path,
    )
    casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)

    base_occupancy = cache.get_statistics_deprecated(
        io_class_id=ioclass_id)["occupancy"]
    TestRun.LOGGER.info("Reading the file in the nested directory")
    dd = (Dd().input(test_file_1.full_path).output("/dev/null").block_size(
        Size(1, Unit.MebiByte)))
    dd.run()

    new_occupancy = cache.get_statistics_deprecated(
        io_class_id=ioclass_id)["occupancy"]
    assert new_occupancy == base_occupancy + test_file_1.size, \
        "Wrong occupancy after reading file!\n" \
        f"Expected: {base_occupancy + test_file_1.size}, actual: {new_occupancy}"

    # Test classification in nested dir by creating a file
    base_occupancy = new_occupancy
    TestRun.LOGGER.info("Creating the second file in the nested directory")
    test_file_2 = File(f"{nested_dir_path}/test_file_2")
    dd = (Dd().input("/dev/urandom").output(test_file_2.full_path).count(
        random.randint(1, 200)).block_size(Size(1, Unit.MebiByte)))
    dd.run()
    sync()
    drop_caches(DropCachesMode.ALL)
    test_file_2.refresh_item()

    new_occupancy = cache.get_statistics_deprecated(
        io_class_id=ioclass_id)["occupancy"]
    assert new_occupancy == base_occupancy + test_file_2.size, \
        "Wrong occupancy after creating file!\n" \
        f"Expected: {base_occupancy + test_file_2.size}, actual: {new_occupancy}"