Exemple #1
0
def test_trim_eviction(cache_mode, cache_line_size, filesystem, cleaning):
    """
        title: Test verifying if trim requests do not cause eviction on CAS device.
        description: |
          When trim requests enabled and files are being added and removed from CAS device,
          there is no eviction (no reads from cache).
        pass_criteria:
          - Reads from cache device are the same before and after removing test file.
    """
    mount_point = "/mnt"
    test_file_path = os.path.join(mount_point, "test_file")

    with TestRun.step("Prepare devices."):
        cache_disk = TestRun.disks['cache']
        cache_disk.create_partitions([Size(1, Unit.GibiByte)])
        cache_dev = cache_disk.partitions[0]

        core_disk = TestRun.disks['core']
        core_disk.create_partitions([Size(1, Unit.GibiByte)])
        core_dev = core_disk.partitions[0]

    with TestRun.step("Start cache on device supporting trim and add core."):
        cache = casadm.start_cache(cache_dev, cache_mode, cache_line_size)
        cache.set_cleaning_policy(cleaning)
        Udev.disable()
        core = cache.add_core(core_dev)

    with TestRun.step("Create filesystem on CAS device and mount it."):
        core.create_filesystem(filesystem)
        core.mount(mount_point, ["discard"])

    with TestRun.step("Create random file using ddrescue."):
        test_file = fs_utils.create_random_test_file(test_file_path,
                                                     core_dev.size * 0.9)
        create_file_with_ddrescue(core_dev, test_file)

    with TestRun.step("Remove file and create a new one."):
        cache_iostats_before = cache_dev.get_io_stats()
        test_file.remove()
        os_utils.sync()
        os_utils.drop_caches()
        create_file_with_ddrescue(core_dev, test_file)

    with TestRun.step(
            "Check using iostat that reads from cache did not occur."):
        cache_iostats_after = cache_dev.get_io_stats()
        reads_before = cache_iostats_before.sectors_read
        reads_after = cache_iostats_after.sectors_read

        if reads_after != reads_before:
            TestRun.fail(
                f"Number of reads from cache before and after removing test file "
                f"differs. Reads before: {reads_before}, reads after: {reads_after}."
            )
        else:
            TestRun.LOGGER.info(
                "Number of reads from cache before and after removing test file is the same."
            )
Exemple #2
0
def test_load_cache_with_mounted_core(cache_mode):
    """
        title: Fault injection test for adding mounted core on cache load.
        description: |
          Negative test of the ability of CAS to add to cache while its loading
          core device which is mounted.
        pass_criteria:
          - No system crash while loading cache.
          - Adding mounted core while loading cache fails.
    """
    with TestRun.step("Prepare cache and core devices. Start CAS."):
        cache_dev = TestRun.disks['cache']
        cache_dev.create_partitions([Size(1, Unit.GibiByte)])
        cache_part = cache_dev.partitions[0]
        core_dev = TestRun.disks['core']
        core_dev.create_partitions([Size(4, Unit.GibiByte)])
        core_part = core_dev.partitions[0]
        cache = casadm.start_cache(cache_part, cache_mode, force=True)

    with TestRun.step("Add core device with xfs filesystem to cache and mount it."):
        core_part.create_filesystem(Filesystem.xfs)
        core = cache.add_core(core_part)
        core.mount(mount_point)

    with TestRun.step(f"Create test file in mount point of exported object and check its md5 sum."):
        test_file = fs_utils.create_random_test_file(test_file_path)
        test_file_md5_before = test_file.md5sum()

    with TestRun.step("Unmount core device."):
        core.unmount()

    with TestRun.step("Stop cache."):
        cache.stop()
        caches_count = len(casadm_parser.get_caches())
        if caches_count != 0:
            TestRun.fail(f"Expected caches count: 0; Actual caches count: {caches_count}.")

    with TestRun.step("Mount core device."):
        core_part.mount(mount_point)

    with TestRun.step("Try to load cache."):
        cache = casadm.load_cache(cache.cache_device)
        caches_count = len(casadm_parser.get_caches())
        if caches_count != 1:
            TestRun.fail(f"Expected caches count: 1 Actual caches count: {caches_count}.")
        cores_count = len(casadm_parser.get_cores(cache.cache_id))
        if cores_count != 0:
            TestRun.fail(f"Expected cores count: 0; Actual cores count: {cores_count}.")

    with TestRun.step("Check md5 sum of test file again."):
        if test_file_md5_before != test_file.md5sum():
            TestRun.LOGGER.error("Md5 sum of test file is different.")
        core_part.unmount()

    with TestRun.step("Stop cache."):
        casadm.stop_all_caches()
def prepare_with_file_creation(config):
    cache_dev, core_dev = prepare()
    cache = casadm.start_cache(cache_dev, config, force=True)
    core = cache.add_core(core_dev)
    core.create_filesystem(Filesystem.ext3)
    core.mount(mount_point)
    file = fs_utils.create_random_test_file(test_file_path)
    file_md5sum = file.md5sum()
    core.unmount()
    return cache, core, file, file_md5sum
def create_files_with_md5sums(destination_path, files_count):
    md5sums = list()
    for i in range(0, files_count):
        temp_file = f"/tmp/file{i}"
        destination_file = f"{destination_path}/file{i}"

        test_file = fs_utils.create_random_test_file(temp_file, test_file_size)
        test_file.copy(destination_file, force=True)

        md5sums.append(test_file.md5sum())

    TestRun.LOGGER.info(f"Files created and copied to core successfully.")
    return md5sums
def test_raid_as_cache(cache_mode):
    """
        title: Test if SW RAID1 can be a cache device.
        description: |
          Test if SW RAID1 can be a cache for CAS device.
        pass_criteria:
          - Successful creation of RAID and building CAS device with it.
          - Files copied successfully, the md5sum match the origin one.
    """
    with TestRun.step("Create RAID1."):
        raid_disk = TestRun.disks['cache1']
        raid_disk.create_partitions([Size(2, Unit.GibiByte)])
        raid_disk_1 = raid_disk.partitions[0]
        raid_disk2 = TestRun.disks['cache2']
        raid_disk2.create_partitions([Size(2, Unit.GibiByte)])
        raid_disk_2 = raid_disk2.partitions[0]

        config = RaidConfiguration(
            level=Level.Raid1,
            metadata=MetadataVariant.Legacy,
            number_of_devices=2)

        raid_volume = Raid.create(config, [raid_disk_1, raid_disk_2])
        TestRun.LOGGER.info(f"RAID created successfully.")

    with TestRun.step("Prepare core device."):
        core_disk = TestRun.disks['core']
        core_disk.create_partitions([Size(2, Unit.GibiByte)])
        core_dev = core_disk.partitions[0]

    with TestRun.step("Create CAS device with RAID1 as cache."):
        cache = casadm.start_cache(raid_volume, cache_mode, force=True)
        core = cache.add_core(core_dev)

        core.create_filesystem(Filesystem.ext3)
        core.mount(mount_point)

    with TestRun.step("Copy files to cache and check md5sum."):
        for i in range(0, files_number):
            test_file = fs_utils.create_random_test_file(test_file_tmp_path, test_file_size)
            test_file_copied = test_file.copy(test_file_path, force=True)

            if test_file.md5sum() != test_file_copied.md5sum():
                TestRun.LOGGER.error("Checksums are different.")

            fs_utils.remove(test_file.full_path, True)
            fs_utils.remove(test_file_copied.full_path, True)

        TestRun.LOGGER.info(f"Successful verification.")
def create_test_files(test_file_size):
    source_file = fs_utils.create_random_test_file("/tmp/source_test_file", test_file_size)
    target_file = File.create_file("/tmp/target_test_file")
    return source_file, target_file
Exemple #7
0
def test_clean_stop_cache(cache_mode):
    """
        title: Test of the ability to stop cache in modes with lazy writes.
        description: |
          Test if OpenCAS stops cache in modes with lazy writes without data loss.
        pass_criteria:
          - Cache stopping works properly.
          - Writes to exported object and core device during OpenCAS's work are equal
          - Data on core device is correct after cache is stopped.
    """
    with TestRun.step("Prepare devices for cache and core."):
        cache_dev = TestRun.disks['cache']
        cache_dev.create_partitions([Size(256, Unit.MebiByte)])
        cache_part = cache_dev.partitions[0]
        core_dev = TestRun.disks['core']
        core_dev.create_partitions([Size(512, Unit.MebiByte)])
        core_part = core_dev.partitions[0]
        Udev.disable()

    with TestRun.step(f"Start cache in {cache_mode} mode."):
        cache = casadm.start_cache(cache_part, cache_mode)

    with TestRun.step("Add core to cache."):
        core = cache.add_core(core_part)

    with TestRun.step("Disable cleaning and sequential cutoff."):
        cache.set_cleaning_policy(CleaningPolicy.nop)
        cache.set_seq_cutoff_policy(SeqCutOffPolicy.never)

    with TestRun.step("Read IO stats before test"):
        core_disk_writes_initial = check_device_write_stats(core_part)
        exp_obj_writes_initial = check_device_write_stats(core)

    with TestRun.step("Write data to the exported object."):
        test_file_main = create_random_test_file("/tmp/test_file_main", Size(64, Unit.MebiByte))
        dd = Dd().output(core.system_path) \
            .input(test_file_main.full_path) \
            .block_size(bs) \
            .count(int(test_file_main.size / bs)) \
            .oflag("direct")
        dd.run()
        test_file_md5sum_main = test_file_main.md5sum()

    with TestRun.step("Read IO stats after write to the exported object."):
        core_disk_writes_increase = (
            check_device_write_stats(core_part) - core_disk_writes_initial
        )
        exp_obj_writes_increase = (
            check_device_write_stats(core) - exp_obj_writes_initial
        )

    with TestRun.step("Validate IO stats after write to the exported object."):
        if core_disk_writes_increase > 0:
            TestRun.LOGGER.error("Writes should occur only on the exported object.")
        if exp_obj_writes_increase != test_file_main.size.value:
            TestRun.LOGGER.error("Not all writes reached the exported object.")

    with TestRun.step("Read data from the exported object."):
        test_file_1 = File.create_file("/tmp/test_file_1")
        dd = Dd().output(test_file_1.full_path) \
            .input(core.system_path) \
            .block_size(bs) \
            .count(int(test_file_main.size / bs)) \
            .oflag("direct")
        dd.run()
        test_file_1.refresh_item()
        sync()

    with TestRun.step("Compare md5 sum of test files."):
        if test_file_md5sum_main != test_file_1.md5sum():
            TestRun.LOGGER.error("Md5 sums should be equal.")

    with TestRun.step("Read data from the core device."):
        test_file_2 = File.create_file("/tmp/test_file_2")
        dd = Dd().output(test_file_2.full_path) \
            .input(core_part.system_path) \
            .block_size(bs) \
            .count(int(test_file_main.size / bs)) \
            .oflag("direct")
        dd.run()
        test_file_2.refresh_item()
        sync()

    with TestRun.step("Compare md5 sum of test files."):
        if test_file_md5sum_main == test_file_2.md5sum():
            TestRun.LOGGER.error("Md5 sums should be different.")

    with TestRun.step("Read IO stats before stopping cache."):
        core_disk_writes_before_stop = check_device_write_stats(core_part)

    with TestRun.step("Stop cache."):
        cache.stop()

    with TestRun.step("Read IO stats after stopping cache."):
        core_disk_writes_increase = (
            check_device_write_stats(core_part) - core_disk_writes_before_stop
        )

    with TestRun.step("Validate IO stats after stopping cache."):
        if core_disk_writes_increase == 0:
            TestRun.LOGGER.error("Writes should occur on the core device after stopping cache.")
        if core_disk_writes_increase != exp_obj_writes_increase:
            TestRun.LOGGER.error("Write statistics for the core device should be equal "
                                 "to those from the exported object.")

    with TestRun.step("Read data from the core device."):
        test_file_3 = File.create_file("/tmp/test_file_2")
        dd = Dd().output(test_file_3.full_path) \
            .input(core_part.system_path) \
            .block_size(bs) \
            .count(int(test_file_main.size / bs)) \
            .oflag("direct")
        dd.run()
        test_file_3.refresh_item()
        sync()

    with TestRun.step("Compare md5 sum of test files."):
        if test_file_md5sum_main != test_file_3.md5sum():
            TestRun.LOGGER.error("Md5 sums should be equal.")

    with TestRun.step("Delete test files."):
        test_file_main.remove(True)
        test_file_1.remove(True)
        test_file_2.remove(True)
        test_file_3.remove(True)
Exemple #8
0
def test_clean_remove_core_with_fs(cache_mode, fs):
    """
        title: Test of the ability to remove core from cache in lazy-write modes with filesystem.
        description: |
          Test if OpenCAS removes core from cache in modes with lazy writes and with different
          filesystems without data loss.
        pass_criteria:
          - Core removing works properly.
          - Data on core device is correct after core is removed.
    """
    with TestRun.step("Prepare devices for cache and core."):
        cache_dev = TestRun.disks['cache']
        cache_dev.create_partitions([Size(256, Unit.MebiByte)])
        cache_part = cache_dev.partitions[0]
        core_dev = TestRun.disks['core']
        core_dev.create_partitions([Size(512, Unit.MebiByte)])
        core_part = core_dev.partitions[0]
        Udev.disable()

    with TestRun.step(f"Start cache in {cache_mode} mode."):
        cache = casadm.start_cache(cache_part, cache_mode)

    with TestRun.step(f"Add core with {fs.name} filesystem to cache and mount it."):
        core_part.create_filesystem(fs)
        core = cache.add_core(core_part)
        core.mount(mnt_point)

    with TestRun.step("Disable cleaning and sequential cutoff."):
        cache.set_cleaning_policy(CleaningPolicy.nop)
        cache.set_seq_cutoff_policy(SeqCutOffPolicy.never)

    with TestRun.step("Create test file and read its md5 sum."):
        test_file_main = create_random_test_file("/tmp/test_file_main", Size(64, Unit.MebiByte))
        test_file_md5sum_main = test_file_main.md5sum()

    with TestRun.step("Copy test file to the exported object."):
        test_file_1 = File.create_file(mnt_point + "test_file_1")
        dd = Dd().output(test_file_1.full_path) \
            .input(test_file_main.full_path) \
            .block_size(bs) \
            .count(int(test_file_main.size / bs)) \
            .oflag("direct")
        dd.run()
        test_file_1.refresh_item()
        sync()

    with TestRun.step("Compare md5 sum of test files."):
        if test_file_md5sum_main != test_file_1.md5sum():
            TestRun.LOGGER.error("Md5 sums should be equal.")

    with TestRun.step("Unmount and remove core."):
        core.unmount()
        core.remove_core()

    with TestRun.step("Mount core device."):
        core_part.mount(mnt_point)

    with TestRun.step("Read data from the core device."):
        test_file_2 = File.create_file("/tmp/test_file_2")
        dd = Dd().output(test_file_2.full_path) \
            .input(test_file_1.full_path) \
            .block_size(bs) \
            .count(int(test_file_1.size / bs)) \
            .oflag("direct")
        dd.run()
        test_file_2.refresh_item()
        sync()

    with TestRun.step("Compare md5 sum of test files."):
        if test_file_md5sum_main != test_file_2.md5sum():
            TestRun.LOGGER.error("Md5 sums should be equal.")

    with TestRun.step("Delete test files."):
        test_file_main.remove(True)
        test_file_1.remove(True)
        test_file_2.remove(True)

    with TestRun.step("Unmount core device."):
        core_part.unmount()
        remove(mnt_point, True, True, True)
def test_stop_no_flush_load_cache(cache_mode, filesystem):
    """
        title: Test to check that 'stop --no-data-flush' command works correctly.
        description: |
          Negative test of the ability of CAS to load unflushed cache on core device
          with filesystem. Test uses lazy flush cache modes.
        pass_criteria:
          - No system crash while load cache.
          - Starting cache without loading metadata fails.
          - Starting cache with loading metadata finishes with success.
    """
    with TestRun.step("Prepare cache and core devices."):
        cache_part, core_part = prepare()

    with TestRun.step("Start cache."):
        cache = casadm.start_cache(cache_part, cache_mode, force=True)

    with TestRun.step("Change cleaning policy to NOP."):
        cache.set_cleaning_policy(CleaningPolicy.nop)

    with TestRun.step(
            f"Add core with {filesystem.name} filesystem to cache and mount it."
    ):
        core_part.create_filesystem(filesystem)
        core = cache.add_core(core_part)
        core.mount(mount_point)

    with TestRun.step(
            f"Create test file in mount point of exported object and check its md5 sum."
    ):
        test_file = fs_utils.create_random_test_file(test_file_path,
                                                     Size(48, Unit.MebiByte))
        test_file_md5_before = test_file.md5sum()

    with TestRun.step("Unmount exported object."):
        core.unmount()

    with TestRun.step("Count dirty blocks on exported object."):
        dirty_blocks_before = core.get_dirty_blocks()

    with TestRun.step("Stop cache with option '--no-data-flush'."):
        cache.stop(no_data_flush=True)
        caches_count = len(casadm_parser.get_caches())
        if caches_count != 0:
            TestRun.fail(
                f"Expected caches count: 0; Actual caches count: {caches_count}."
            )

    with TestRun.step("Try to start cache without loading metadata."):
        output = TestRun.executor.run_expect_fail(
            cli.start_cmd(cache_dev=str(cache_part.path),
                          cache_mode=str(cache_mode.name.lower()),
                          force=False,
                          load=False))
        cli_messages.check_stderr_msg(
            output, cli_messages.start_cache_with_existing_metadata)

    with TestRun.step("Load cache."):
        cache = casadm.load_cache(cache.cache_device)
        caches_count = len(casadm_parser.get_caches())
        if caches_count != 1:
            TestRun.fail(
                f"Expected caches count: 1 Actual caches count: {caches_count}."
            )
        cores_count = len(casadm_parser.get_cores(cache.cache_id))
        if cores_count != 1:
            TestRun.fail(
                f"Expected cores count: 1; Actual cores count: {cores_count}.")

    with TestRun.step(
            "Compare dirty blocks number before and after loading cache."):
        if dirty_blocks_before != core.get_dirty_blocks():
            TestRun.LOGGER.error(
                "Dirty blocks number is different than before loading cache.")

    with TestRun.step("Mount exported object."):
        core.mount(mount_point)

    with TestRun.step(
            "Compare md5 sum of test file before and after loading cache."):
        if test_file_md5_before != test_file.md5sum():
            TestRun.LOGGER.error(
                "Test file's md5 sum is different than before loading cache.")

    with TestRun.step("Unmount exported object."):
        core.unmount()

    with TestRun.step("Stop cache."):
        casadm.stop_all_caches()
Exemple #10
0
def test_trim_device_discard_support(trim_support_cache_core, cache_mode,
                                     filesystem, cleaning_policy):
    """
        title: Trim requests supported on various cache and core devices.
        description: |
          Handling trim requests support when various combination of SSD and HDD are used as
          cache and core.
        pass_criteria:
          - No system crash.
          - Discards detected on CAS.
          - Discards detected on SSD device when it is used as core.
          - Discards not detected on HDD device used as cache or core.
          - Discards not detected on cache device.
    """

    mount_point = "/mnt"

    with TestRun.step(f"Create partitions on SSD and HDD devices."):
        TestRun.disks["ssd1"].create_partitions([Size(1, Unit.GibiByte)])
        TestRun.disks["ssd2"].create_partitions([Size(1, Unit.GibiByte)])
        TestRun.disks["hdd"].create_partitions([Size(1, Unit.GibiByte)])
        ssd1_dev = TestRun.disks["ssd1"].partitions[0]
        ssd2_dev = TestRun.disks["ssd2"].partitions[0]
        hdd_dev = TestRun.disks["hdd"].partitions[0]

    with TestRun.step(f"Start cache and add core."):
        cache_dev = ssd1_dev if trim_support_cache_core[0] else hdd_dev
        core_dev = ssd2_dev if trim_support_cache_core[1] else hdd_dev

        cache = casadm.start_cache(cache_dev, cache_mode, force=True)
        cache.set_cleaning_policy(cleaning_policy)
        core = cache.add_core(core_dev)

    with TestRun.step("Make filesystem and mount it with discard option."):
        core.create_filesystem(filesystem)
        core.mount(mount_point, ["discard"])

    with TestRun.step("Create random file."):
        test_file = fs_utils.create_random_test_file(
            os.path.join(mount_point, "test_file"), core_dev.size * 0.9)
        occupancy_before = core.get_occupancy()
        TestRun.LOGGER.info(str(core.get_statistics()))

    with TestRun.step("Start blktrace monitoring on all devices."):
        blktraces = start_monitoring(core_dev, cache_dev, core)

    with TestRun.step("Remove file."):
        os_utils.sync()
        os_utils.drop_caches()
        test_file.remove()

    with TestRun.step(
            "Ensure that discards were detected by blktrace on proper devices."
    ):
        discard_expected = {
            "core": trim_support_cache_core[1],
            "cache": False,
            "cas": True
        }
        stop_monitoring_and_check_discards(blktraces, discard_expected)

    with TestRun.step("Ensure occupancy reduced."):
        occupancy_after = core.get_occupancy()
        TestRun.LOGGER.info(str(core.get_statistics()))

        if occupancy_after >= occupancy_before:
            TestRun.LOGGER.error(
                "Occupancy on core after removing test file greater than before."
            )
        else:
            TestRun.LOGGER.info(
                "Occupancy on core after removing test file smaller than before."
            )

    with TestRun.step("Check CAS sysfs properties values."):
        check_sysfs_properties(
            cache,
            cache_dev,
            core,
            core_dev.parent_device,
            core_supporting_discards=trim_support_cache_core[1])
def test_flush_signal_core(cache_mode):
    """
        title: Test for FLUSH nad FUA signals sent to core device in modes with lazy writes.
        description: |
          Test if OpenCAS transmits FLUSH and FUA signals to core device in modes with lazy writes.
        pass_criteria:
          - FLUSH requests should be passed to core device.
          - FUA requests should be passed to core device.
    """
    with TestRun.step(
            "Set mark in syslog to not read entries existing before the test."
    ):
        Logs._read_syslog(Logs.last_read_line)

    with TestRun.step("Prepare devices for cache and core."):
        cache_dev = TestRun.disks['cache']
        cache_dev.create_partitions([Size(2, Unit.GibiByte)])
        cache_part = cache_dev.partitions[0]
        core_dev = TestRun.scsi_debug_devices[0]

    with TestRun.step(
            "Start cache and add SCSI device with xfs filesystem as core."):
        cache = casadm.start_cache(cache_part, cache_mode)
        core_dev.create_filesystem(Filesystem.xfs)
        core = cache.add_core(core_dev)

    with TestRun.step("Mount exported object."):
        if core.is_mounted():
            core.unmount()
        core.mount(mount_point)

    with TestRun.step("Turn off cleaning policy."):
        cache.set_cleaning_policy(CleaningPolicy.nop)

    with TestRun.step("Create temporary file on exported object."):
        tmp_file = create_random_test_file(f"{mount_point}/tmp.file",
                                           Size(1, Unit.GibiByte))
        os_utils.sync()

    with TestRun.step("Flush cache."):
        cache.flush_cache()
        os_utils.sync()

    with TestRun.step(
            f"Check {syslog_path} for flush request and delete temporary file."
    ):
        Logs.check_syslog_for_signals()
        tmp_file.remove(True)

    with TestRun.step("Create temporary file on exported object."):
        tmp_file = create_random_test_file(f"{mount_point}/tmp.file",
                                           Size(1, Unit.GibiByte))
        os_utils.sync()

    with TestRun.step("Flush core."):
        core.flush_core()
        os_utils.sync()

    with TestRun.step(
            f"Check {syslog_path} for flush request and delete temporary file."
    ):
        Logs.check_syslog_for_signals()
        tmp_file.remove(True)

    with TestRun.step("Turn on alru cleaning policy and set policy params."):
        cache.set_cleaning_policy(CleaningPolicy.alru)
        cache.set_params_alru(
            FlushParametersAlru(Time(milliseconds=5000), 10000,
                                Time(seconds=10), Time(seconds=10)))

    with TestRun.step("Create big temporary file on exported object."):
        tmp_file = create_random_test_file(f"{mount_point}/tmp.file",
                                           Size(5, Unit.GibiByte))
        os_utils.sync()

    with TestRun.step(
            "Wait for automatic flush from alru cleaning policy and check log."
    ):
        wait_time = (int(
            cache.get_flush_parameters_alru().staleness_time.total_seconds()) +
                     int(cache.get_flush_parameters_alru().activity_threshold.
                         total_seconds()) +
                     int(cache.get_flush_parameters_alru().wake_up_time.
                         total_seconds()) + 5)
        sleep(wait_time)

    with TestRun.step(
            f"Check {syslog_path} for flush request and delete temporary file."
    ):
        Logs.check_syslog_for_signals()
        tmp_file.remove(True)

    with TestRun.step("Create temporary file on exported object."):
        create_random_test_file(f"{mount_point}/tmp.file",
                                Size(1, Unit.GibiByte))
        os_utils.sync()

    with TestRun.step("Unmount exported object and remove it from cache."):
        core.unmount()
        core.remove_core()
        os_utils.sync()

    with TestRun.step(f"Check {syslog_path} for flush request."):
        Logs.check_syslog_for_signals()

    with TestRun.step("Stop cache."):
        cache.stop()
def test_cas_preserves_partitions(partition_table, filesystem, cache_mode):
    """
        title: Volume test for preserving partition table from core device.
        description: |
          Validation of the ability of CAS to preserve partition table on core device
          after adding it to cache.
        pass_criteria:
          - Md5 sums on partitions shall be identical before and after running cache.
          - Partition table shall be preserved on exported object.
    """
    with TestRun.step(f"Prepare cache and core devices."):
        cache_dev = TestRun.disks['cache']
        cache_dev.create_partitions([Size(1, Unit.GibiByte)])
        cache_dev = cache_dev.partitions[0]
        core_dev = TestRun.disks['core']
        core_sizes = [Size(1, Unit.GibiByte)] * cores_number
        core_dev.create_partitions(core_sizes, partition_table)

    with TestRun.step("Create filesystem on core devices."):
        for i in range(cores_number):
            core_dev.partitions[i].create_filesystem(filesystem)

    with TestRun.step("Mount core devices and create test files."):
        files = []
        for i, core in enumerate(core_dev.partitions):
            mount_path = f"{mount_point}{i}"
            core.mount(mount_path)
            test_file_path = f"{mount_path}/test_file"
            files.append(fs_utils.create_random_test_file(test_file_path))

    with TestRun.step("Check md5 sums of test files."):
        test_files_md5sums = []
        for file in files:
            test_files_md5sums.append(file.md5sum())

    with TestRun.step("Unmount core devices."):
        for core in core_dev.partitions:
            core.unmount()

    with TestRun.step(f"Start cache."):
        cache = casadm.start_cache(cache_dev, cache_mode, force=True)

    with TestRun.step("Add cores to cache."):
        cores = []
        for i in range(cores_number):
            cores.append(cache.add_core(core_dev.partitions[i]))

    with TestRun.step("Mount core devices."):
        for i, core in enumerate(cores):
            mount_path = f"{mount_point}{i}"
            core.mount(mount_path)

    with TestRun.step("Check again md5 sums of test files."):
        test_files_md5sums_new = []
        for file in files:
            test_files_md5sums_new.append(file.md5sum())

    with TestRun.step("Unmount core devices."):
        for core in cores:
            core.unmount()

    with TestRun.step("Stop cache."):
        cache.stop()

    with TestRun.step("Compare md5 sums of test files."):
        if test_files_md5sums != test_files_md5sums_new:
            TestRun.fail("Md5 sums are different.")
def test_partition_create_cas(partition_table, filesystem, cache_mode):
    """
        title: Test for preserving partition table created on exported volume after stopping cache.
        description: |
          Validation of the ability of CAS to preserve partition table created on exported volume
          after stopping cache.
        pass_criteria:
          - Md5 sums on partitions shall be identical before and after stopping cache.
          - Partition table shall be preserved on core device.
    """
    with TestRun.step(f"Prepare cache and core devices."):
        cache_dev = TestRun.disks['cache']
        cache_dev.create_partitions([Size(256, Unit.MebiByte)])
        cache_dev = cache_dev.partitions[0]
        core_dev = TestRun.disks['core']

    with TestRun.step(f"Start cache."):
        cache = casadm.start_cache(cache_dev, cache_mode, force=True)
        core = cache.add_core(core_dev)

    with TestRun.step("Create partitions on exported device."):
        core_sizes = [Size(1, Unit.GibiByte)] * cores_number
        core.block_size = core_dev.block_size
        disk_utils.create_partitions(core, core_sizes, partition_table)

    with TestRun.step("Create filesystem on core devices."):
        for part in core.partitions:
            part.create_filesystem(filesystem)

    with TestRun.step("Mount core devices and create test files."):
        files = []
        for i, part in enumerate(core.partitions):
            mount_path = f"{mount_point}{i}"
            part.mount(mount_path)
            test_file_path = f"{mount_path}/test_file"
            files.append(fs_utils.create_random_test_file(test_file_path))

    with TestRun.step("Check md5 sums of test files."):
        test_files_md5sums = []
        for file in files:
            test_files_md5sums.append(file.md5sum())

    with TestRun.step("Unmount core devices."):
        for part in core.partitions:
            part.unmount()

    with TestRun.step("Stop cache."):
        cache.stop()

    with TestRun.step("Read partitions on core device."):
        for part in core.partitions:
            part.parent_device = core_dev
            new_part = Partition(part.parent_device, part.type, part.number)
            core_dev.partitions.append(new_part)

    with TestRun.step("Mount core devices."):
        counter = 1
        for i, core in enumerate(core_dev.partitions):
            mount_path = f"{mount_point}{i}"
            core.mount(mount_path)
            counter += 1

    with TestRun.step("Check again md5 sums of test files."):
        test_files_md5sums_new = []
        for file in files:
            test_files_md5sums_new.append(file.md5sum())

    with TestRun.step("Unmount core devices."):
        for core in core_dev.partitions:
            core.unmount()

    with TestRun.step("Compare md5 sums of test files."):
        if test_files_md5sums != test_files_md5sums_new:
            TestRun.fail("Md5 sums are different.")
def test_trim_eviction(cache_mode, cache_line_size, filesystem, cleaning):
    """
        title: Test verifying if trim requests do not cause eviction on CAS device.
        description: |
          When trim requests enabled and files are being added and removed from CAS device,
          there is no eviction (no reads from cache).
        pass_criteria:
          - Reads from cache device are the same before and after removing test file.
    """
    mount_point = "/mnt"
    test_file_path = os.path.join(mount_point, "test_file")

    with TestRun.step("Prepare devices."):
        cache_disk = TestRun.disks['cache']
        cache_disk.create_partitions([Size(1, Unit.GibiByte)])
        cache_dev = cache_disk.partitions[0]

        core_disk = TestRun.disks['core']
        core_disk.create_partitions([Size(1, Unit.GibiByte)])
        core_dev = core_disk.partitions[0]

        cache_block_size = disk_utils.get_block_size(cache_disk)

    with TestRun.step("Start cache on device supporting trim and add core."):
        cache = casadm.start_cache(cache_dev,
                                   cache_mode,
                                   cache_line_size,
                                   force=True)
        cache.set_cleaning_policy(cleaning)
        Udev.disable()
        core = cache.add_core(core_dev)

    with TestRun.step("Create filesystem on CAS device and mount it."):
        core.create_filesystem(filesystem)
        core.mount(mount_point, ["discard"])

    with TestRun.step("Create ioclass config."):
        ioclass_config.create_ioclass_config()
        ioclass_config.add_ioclass(ioclass_id=1,
                                   eviction_priority=1,
                                   allocation="0.00",
                                   rule=f"metadata")
        casadm.load_io_classes(cache_id=cache.cache_id,
                               file=ioclass_config.default_config_file_path)

    with TestRun.step("Create random file using ddrescue."):
        test_file = fs_utils.create_random_test_file(test_file_path,
                                                     core_dev.size * 0.9)
        create_file_with_ddrescue(core_dev, test_file)
        os_utils.sync()
        os_utils.drop_caches()

    with TestRun.step("Remove file and create a new one."):
        cache_iostats_before = cache_dev.get_io_stats()
        data_reads_before = cache.get_io_class_statistics(
            io_class_id=0).block_stats.cache.reads
        metadata_reads_before = cache.get_io_class_statistics(
            io_class_id=1).block_stats.cache.reads
        test_file.remove()
        os_utils.sync()
        os_utils.drop_caches()
        create_file_with_ddrescue(core_dev, test_file)

    with TestRun.step(
            "Check using iostat that reads from cache did not occur."):
        cache_iostats_after = cache_dev.get_io_stats()
        data_reads_after = cache.get_io_class_statistics(
            io_class_id=0).block_stats.cache.reads
        metadata_reads_after = cache.get_io_class_statistics(
            io_class_id=1).block_stats.cache.reads
        reads_before = cache_iostats_before.sectors_read
        reads_after = cache_iostats_after.sectors_read

        metadata_reads_diff = metadata_reads_after - metadata_reads_before
        data_reads_diff = data_reads_after - data_reads_before
        iostat_diff = (reads_after - reads_before) * cache_block_size

        if iostat_diff > int(metadata_reads_diff) or int(data_reads_diff) > 0:
            TestRun.fail(
                f"Number of reads from cache before and after removing test file "
                f"differs. Sectors read before: {reads_before}, sectors read after: {reads_after}."
                f"Data read from cache before {data_reads_before}, after {data_reads_after}."
                f"Metadata read from cache before {metadata_reads_before}, "
                f"after {metadata_reads_after}.")
        else:
            TestRun.LOGGER.info(
                "Number of reads from cache before and after removing test file is the same."
            )