def post_setup(self):
        print("VDBench plugin post setup")
        if not self.reinstall and fs_utils.check_if_directory_exists(self.working_dir):
            return

        if fs_utils.check_if_directory_exists(self.working_dir):
            fs_utils.remove(self.working_dir, True, True)

        fs_utils.create_directory(self.working_dir)
        TestRun.LOGGER.info("Copying vdbench to working dir.")
        fs_utils.copy(os.path.join(self.source_dir, "*"), self.working_dir,
                      True, True)
        pass
def test_raid_as_cache(cache_mode):
    """
        title: Test if SW RAID1 can be a cache device.
        description: |
          Test if SW RAID1 can be a cache for CAS device.
        pass_criteria:
          - Successful creation of RAID and building CAS device with it.
          - Files copied successfully, the md5sum match the origin one.
    """
    with TestRun.step("Create RAID1."):
        raid_disk = TestRun.disks['cache1']
        raid_disk.create_partitions([Size(2, Unit.GibiByte)])
        raid_disk_1 = raid_disk.partitions[0]
        raid_disk2 = TestRun.disks['cache2']
        raid_disk2.create_partitions([Size(2, Unit.GibiByte)])
        raid_disk_2 = raid_disk2.partitions[0]

        config = RaidConfiguration(
            level=Level.Raid1,
            metadata=MetadataVariant.Legacy,
            number_of_devices=2)

        raid_volume = Raid.create(config, [raid_disk_1, raid_disk_2])
        TestRun.LOGGER.info(f"RAID created successfully.")

    with TestRun.step("Prepare core device."):
        core_disk = TestRun.disks['core']
        core_disk.create_partitions([Size(2, Unit.GibiByte)])
        core_dev = core_disk.partitions[0]

    with TestRun.step("Create CAS device with RAID1 as cache."):
        cache = casadm.start_cache(raid_volume, cache_mode, force=True)
        core = cache.add_core(core_dev)

        core.create_filesystem(Filesystem.ext3)
        core.mount(mount_point)

    with TestRun.step("Copy files to cache and check md5sum."):
        for i in range(0, files_number):
            test_file = fs_utils.create_random_test_file(test_file_tmp_path, test_file_size)
            test_file_copied = test_file.copy(test_file_path, force=True)

            if test_file.md5sum() != test_file_copied.md5sum():
                TestRun.LOGGER.error("Checksums are different.")

            fs_utils.remove(test_file.full_path, True)
            fs_utils.remove(test_file_copied.full_path, True)

        TestRun.LOGGER.info(f"Successful verification.")
def test_create_example_files():
    """
        title: Example test manipulating on filesystem.
        description: Perform various operations on filesystem.
        pass_criteria:
          - System does not crash.
          - All operations complete successfully.
          - Data consistency is being preserved.
    """
    with TestRun.step("Create file with content"):
        file1 = File.create_file("example_file")
        file1.write("Test file\ncontent line\ncontent")
    with TestRun.step("Read file content"):
        content_before_change = file1.read()
        TestRun.LOGGER.info(f"File content: {content_before_change}")
    with TestRun.step("Replace single line in file"):
        fs_utils.replace_in_lines(file1, 'content line', 'replaced line')
    with TestRun.step("Read file content and check if it changed"):
        content_after_change = file1.read()
        if content_before_change == content_after_change:
            TestRun.fail("Content didn't changed as expected")

    with TestRun.step("Make copy of the file and check if md5 sum matches"):
        file2 = file1.copy('/tmp', force=True)
        if file1.md5sum() != file2.md5sum():
            TestRun.fail("md5 sum doesn't match!")
    with TestRun.step("Change permissions of second file"):
        file2.chmod_numerical(123)
    with TestRun.step("Remove second file"):
        fs_utils.remove(file2.full_path, True)

    with TestRun.step("List contents of home directory"):
        dir1 = Directory("~")
        dir_content = dir1.ls()
    with TestRun.step("Change permissions of file"):
        file1.chmod(fs_utils.Permissions['r'] | fs_utils.Permissions['w'],
                    fs_utils.PermissionsUsers(7))
    with TestRun.step("Log home directory content"):
        for item in dir_content:
            TestRun.LOGGER.info(f"Item {str(item)} - {type(item).__name__}")
    with TestRun.step("Remove file"):
        fs_utils.remove(file1.full_path, True)
Exemple #4
0
def test_create_example_files(prepare_and_cleanup):
    prepare()
    TestProperties.LOGGER.info("Test run")
    file1 = File.create_file("example_file")
    file1.write("Test file\ncontent line\ncontent")
    content_before_change = file1.read()
    TestProperties.LOGGER.info(f"File content: {content_before_change}")
    fs_utils.replace_in_lines(file1, 'content line', 'replaced line')

    content_after_change = file1.read()
    assert content_before_change != content_after_change

    file2 = file1.copy('/tmp', force=True)
    assert file1.md5sum() == file2.md5sum()

    file2.chmod_numerical(123)
    fs_utils.remove(file2.full_path, True)
    dir1 = Directory("~")
    dir_content = dir1.ls()
    file1.chmod(fs_utils.Permissions['r'] | fs_utils.Permissions['w'],
                fs_utils.PermissionsUsers(7))
    for item in dir_content:
        TestProperties.LOGGER.info(f"Item {str(item)} - {type(item).__name__}")
    fs_utils.remove(file1.full_path, True)
Exemple #5
0
def test_clean_remove_core_without_fs(cache_mode):
    """
        title: Test of the ability to remove core from cache in lazy-write modes without filesystem.
        description: |
          Test if OpenCAS removes core without filesystem in modes with lazy writes
          without data loss.
        pass_criteria:
          - Core removing works properly.
          - Writes to exported object and core device during OpenCAS's work are equal
          - Data on core device is correct after core is removed.
    """
    with TestRun.step("Prepare devices for cache and core."):
        cache_dev = TestRun.disks['cache']
        cache_dev.create_partitions([Size(256, Unit.MebiByte)])
        cache_part = cache_dev.partitions[0]
        core_dev = TestRun.disks['core']
        core_dev.create_partitions([Size(512, Unit.MebiByte)])
        core_part = core_dev.partitions[0]
        Udev.disable()

    with TestRun.step(f"Start cache in {cache_mode} mode."):
        cache = casadm.start_cache(cache_part, cache_mode)

    with TestRun.step("Add core to cache."):
        core = cache.add_core(core_part)

    with TestRun.step("Disable cleaning and sequential cutoff."):
        cache.set_cleaning_policy(CleaningPolicy.nop)
        cache.set_seq_cutoff_policy(SeqCutOffPolicy.never)

    with TestRun.step("Read IO stats before test"):
        core_disk_writes_initial = check_device_write_stats(core_part)
        exp_obj_writes_initial = check_device_write_stats(core)

    with TestRun.step("Write data to exported object."):
        test_file_main = create_random_test_file("/tmp/test_file_main", Size(64, Unit.MebiByte))
        dd = Dd().output(core.system_path) \
            .input(test_file_main.full_path) \
            .block_size(bs) \
            .count(int(test_file_main.size / bs)) \
            .oflag("direct")
        dd.run()
        test_file_md5sum_main = test_file_main.md5sum()

    with TestRun.step("Read IO stats after write to the exported object."):
        core_disk_writes_increase = (
            check_device_write_stats(core_part) - core_disk_writes_initial
        )
        exp_obj_writes_increase = (
            check_device_write_stats(core) - exp_obj_writes_initial
        )

    with TestRun.step("Validate IO stats after write to the exported object."):
        if core_disk_writes_increase > 0:
            TestRun.LOGGER.error("Writes should occur only on the exported object.")
        if exp_obj_writes_increase != test_file_main.size.value:
            TestRun.LOGGER.error("Not all writes reached the exported object.")

    with TestRun.step("Read data from the exported object."):
        test_file_1 = File.create_file("/tmp/test_file_1")
        dd = Dd().output(test_file_1.full_path) \
            .input(core.system_path) \
            .block_size(bs) \
            .count(int(test_file_main.size / bs)) \
            .oflag("direct")
        dd.run()
        test_file_1.refresh_item()
        sync()

    with TestRun.step("Compare md5 sum of test files."):
        if test_file_md5sum_main != test_file_1.md5sum():
            TestRun.LOGGER.error("Md5 sums should be equal.")

    with TestRun.step("Read data from the core device."):
        test_file_2 = File.create_file("/tmp/test_file_2")
        dd = Dd().output(test_file_2.full_path) \
            .input(core_part.system_path) \
            .block_size(bs) \
            .count(int(test_file_main.size / bs)) \
            .oflag("direct")
        dd.run()
        test_file_2.refresh_item()
        sync()

    with TestRun.step("Compare md5 sum of test files."):
        if test_file_md5sum_main == test_file_2.md5sum():
            TestRun.LOGGER.error("Md5 sums should be different.")

    with TestRun.step("Read IO stats before removing core."):
        core_disk_writes_before_remove = check_device_write_stats(core_part)

    with TestRun.step("Remove core."):
        core.remove_core()

    with TestRun.step("Read IO stats after removing core."):
        core_disk_writes_increase = (
            check_device_write_stats(core_part) - core_disk_writes_before_remove
        )

    with TestRun.step("Validate IO stats after removing core."):
        if core_disk_writes_increase == 0:
            TestRun.LOGGER.error("Writes should occur on the core device after removing core.")
        if core_disk_writes_increase != exp_obj_writes_increase:
            TestRun.LOGGER.error("Write statistics for the core device should be equal "
                                 "to those from the exported object.")

    with TestRun.step("Read data from core device again."):
        test_file_3 = File.create_file("/tmp/test_file_3")
        dd = Dd().output(test_file_3.full_path) \
            .input(core_part.system_path) \
            .block_size(bs) \
            .count(int(test_file_main.size / bs)) \
            .oflag("direct")
        dd.run()
        test_file_3.refresh_item()
        sync()

    with TestRun.step("Compare md5 sum of test files."):
        if test_file_md5sum_main != test_file_3.md5sum():
            TestRun.LOGGER.error("Md5 sums should be equal.")

    with TestRun.step("Delete test files."):
        test_file_main.remove(True)
        test_file_1.remove(True)
        test_file_2.remove(True)
        test_file_3.remove(True)
        remove(mnt_point, True, True, True)
Exemple #6
0
def test_clean_remove_core_with_fs(cache_mode, fs):
    """
        title: Test of the ability to remove core from cache in lazy-write modes with filesystem.
        description: |
          Test if OpenCAS removes core from cache in modes with lazy writes and with different
          filesystems without data loss.
        pass_criteria:
          - Core removing works properly.
          - Data on core device is correct after core is removed.
    """
    with TestRun.step("Prepare devices for cache and core."):
        cache_dev = TestRun.disks['cache']
        cache_dev.create_partitions([Size(256, Unit.MebiByte)])
        cache_part = cache_dev.partitions[0]
        core_dev = TestRun.disks['core']
        core_dev.create_partitions([Size(512, Unit.MebiByte)])
        core_part = core_dev.partitions[0]
        Udev.disable()

    with TestRun.step(f"Start cache in {cache_mode} mode."):
        cache = casadm.start_cache(cache_part, cache_mode)

    with TestRun.step(f"Add core with {fs.name} filesystem to cache and mount it."):
        core_part.create_filesystem(fs)
        core = cache.add_core(core_part)
        core.mount(mnt_point)

    with TestRun.step("Disable cleaning and sequential cutoff."):
        cache.set_cleaning_policy(CleaningPolicy.nop)
        cache.set_seq_cutoff_policy(SeqCutOffPolicy.never)

    with TestRun.step("Create test file and read its md5 sum."):
        test_file_main = create_random_test_file("/tmp/test_file_main", Size(64, Unit.MebiByte))
        test_file_md5sum_main = test_file_main.md5sum()

    with TestRun.step("Copy test file to the exported object."):
        test_file_1 = File.create_file(mnt_point + "test_file_1")
        dd = Dd().output(test_file_1.full_path) \
            .input(test_file_main.full_path) \
            .block_size(bs) \
            .count(int(test_file_main.size / bs)) \
            .oflag("direct")
        dd.run()
        test_file_1.refresh_item()
        sync()

    with TestRun.step("Compare md5 sum of test files."):
        if test_file_md5sum_main != test_file_1.md5sum():
            TestRun.LOGGER.error("Md5 sums should be equal.")

    with TestRun.step("Unmount and remove core."):
        core.unmount()
        core.remove_core()

    with TestRun.step("Mount core device."):
        core_part.mount(mnt_point)

    with TestRun.step("Read data from the core device."):
        test_file_2 = File.create_file("/tmp/test_file_2")
        dd = Dd().output(test_file_2.full_path) \
            .input(test_file_1.full_path) \
            .block_size(bs) \
            .count(int(test_file_1.size / bs)) \
            .oflag("direct")
        dd.run()
        test_file_2.refresh_item()
        sync()

    with TestRun.step("Compare md5 sum of test files."):
        if test_file_md5sum_main != test_file_2.md5sum():
            TestRun.LOGGER.error("Md5 sums should be equal.")

    with TestRun.step("Delete test files."):
        test_file_main.remove(True)
        test_file_1.remove(True)
        test_file_2.remove(True)

    with TestRun.step("Unmount core device."):
        core_part.unmount()
        remove(mnt_point, True, True, True)
def test_ioclass_metadata(filesystem):
    """
        title: Metadata IO classification.
        description: |
          Determine if every operation on files that cause metadata update results in increased
          writes to cached metadata.
        pass_criteria:
          - No kernel bug.
          - Metadata is classified properly.
    """
    # Exact values may not be tested as each file system has different metadata structure.
    test_dir_path = f"{mountpoint}/test_dir"

    with TestRun.step("Prepare cache and core. Disable udev."):
        cache, core = prepare()
        Udev.disable()

    with TestRun.step("Prepare and load IO class config file."):
        ioclass_id = random.randint(1, ioclass_config.MAX_IO_CLASS_ID)
        # metadata IO class
        ioclass_config.add_ioclass(
            ioclass_id=ioclass_id,
            eviction_priority=1,
            allocation=True,
            rule="metadata&done",
            ioclass_config_path=ioclass_config_path,
        )
        casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)

    with TestRun.step(f"Prepare {filesystem.name} filesystem and mount {core.path} "
                      f"at {mountpoint}."):
        core.create_filesystem(filesystem)
        core.mount(mountpoint)
        sync()

    with TestRun.step("Create 20 test files."):
        requests_to_metadata_before = cache.get_io_class_statistics(
            io_class_id=ioclass_id).request_stats.write
        files = []
        for i in range(1, 21):
            file_path = f"{mountpoint}/test_file_{i}"
            dd = (
                Dd().input("/dev/urandom")
                    .output(file_path)
                    .count(random.randint(5, 50))
                    .block_size(Size(1, Unit.MebiByte))
                    .oflag("sync")
            )
            dd.run()
            files.append(File(file_path))

    with TestRun.step("Check requests to metadata."):
        requests_to_metadata_after = cache.get_io_class_statistics(
            io_class_id=ioclass_id).request_stats.write
        if requests_to_metadata_after == requests_to_metadata_before:
            TestRun.fail("No requests to metadata while creating files!")

    with TestRun.step("Rename all test files."):
        requests_to_metadata_before = requests_to_metadata_after
        for file in files:
            file.move(f"{file.full_path}_renamed")
        sync()

    with TestRun.step("Check requests to metadata."):
        requests_to_metadata_after = cache.get_io_class_statistics(
            io_class_id=ioclass_id).request_stats.write
        if requests_to_metadata_after == requests_to_metadata_before:
            TestRun.fail("No requests to metadata while renaming files!")

    with TestRun.step(f"Create directory {test_dir_path}."):
        requests_to_metadata_before = requests_to_metadata_after
        fs_utils.create_directory(path=test_dir_path)

        TestRun.LOGGER.info(f"Moving test files into {test_dir_path}")
        for file in files:
            file.move(test_dir_path)
        sync()

    with TestRun.step("Check requests to metadata."):
        requests_to_metadata_after = cache.get_io_class_statistics(
            io_class_id=ioclass_id).request_stats.write
        if requests_to_metadata_after == requests_to_metadata_before:
            TestRun.fail("No requests to metadata while moving files!")

    with TestRun.step(f"Remove {test_dir_path}."):
        fs_utils.remove(path=test_dir_path, force=True, recursive=True)

    with TestRun.step("Check requests to metadata."):
        requests_to_metadata_after = cache.get_io_class_statistics(
            io_class_id=ioclass_id).request_stats.write
        if requests_to_metadata_after == requests_to_metadata_before:
            TestRun.fail("No requests to metadata while deleting directory with files!")
Exemple #8
0
def test_ioclass_export_configuration(cache_mode):
    """
    title: Export IO class configuration to a file
    description: |
        Test CAS ability to create a properly formatted file with current IO class configuration
    pass_criteria:
     - CAS default IO class configuration contains unclassified class only
     - CAS properly imports previously exported configuration
    """
    with TestRun.LOGGER.step(f"Test prepare"):
        cache, core = prepare(cache_mode)
        saved_config_path = "/tmp/opencas_saved.conf"
        default_list = [IoClass.default()]

    with TestRun.LOGGER.step(
            f"Check IO class configuration (should contain only default class)"
    ):
        csv = casadm.list_io_classes(cache.cache_id, OutputFormat.csv).stdout
        if not IoClass.compare_ioclass_lists(IoClass.csv_to_list(csv),
                                             default_list):
            TestRun.LOGGER.error(
                "Default configuration does not match expected\n"
                f"Current:\n{csv}\n"
                f"Expected:{IoClass.list_to_csv(default_list)}")

    with TestRun.LOGGER.step(
            "Create and load configuration file for 33 IO classes "
            "with random names, allocation and priority values"):
        random_list = IoClass.generate_random_ioclass_list(33)
        IoClass.save_list_to_config_file(
            random_list, ioclass_config_path=ioclass_config_path)
        casadm.load_io_classes(cache.cache_id, ioclass_config_path)

    with TestRun.LOGGER.step(
            "Display and export IO class configuration - displayed configuration "
            "should be the same as created"):
        TestRun.executor.run(
            f"{casadm.list_io_classes_cmd(str(cache.cache_id), OutputFormat.csv.name)}"
            f" > {saved_config_path}")
        csv = fs_utils.read_file(saved_config_path)
        if not IoClass.compare_ioclass_lists(IoClass.csv_to_list(csv),
                                             random_list):
            TestRun.LOGGER.error(
                "Exported configuration does not match expected\n"
                f"Current:\n{csv}\n"
                f"Expected:{IoClass.list_to_csv(random_list)}")

    with TestRun.LOGGER.step("Stop Intel CAS"):
        casadm.stop_cache(cache.cache_id)

    with TestRun.LOGGER.step("Start cache and add core"):
        cache = casadm.start_cache(cache.cache_device, force=True)
        casadm.add_core(cache, core.core_device)

    with TestRun.LOGGER.step(
            "Check IO class configuration (should contain only default class)"
    ):
        csv = casadm.list_io_classes(cache.cache_id, OutputFormat.csv).stdout
        if not IoClass.compare_ioclass_lists(IoClass.csv_to_list(csv),
                                             default_list):
            TestRun.LOGGER.error(
                "Default configuration does not match expected\n"
                f"Current:\n{csv}\n"
                f"Expected:{IoClass.list_to_csv(default_list)}")

    with TestRun.LOGGER.step(
            "Load exported configuration file for 33 IO classes"):
        casadm.load_io_classes(cache.cache_id, saved_config_path)

    with TestRun.LOGGER.step(
            "Display IO class configuration - should be the same as created"):
        csv = casadm.list_io_classes(cache.cache_id, OutputFormat.csv).stdout
        if not IoClass.compare_ioclass_lists(IoClass.csv_to_list(csv),
                                             random_list):
            TestRun.LOGGER.error(
                "Exported configuration does not match expected\n"
                f"Current:\n{csv}\n"
                f"Expected:{IoClass.list_to_csv(random_list)}")

    with TestRun.LOGGER.step(f"Test cleanup"):
        fs_utils.remove(saved_config_path)
 def remove(self, force: bool = False, ignore_errors: bool = False):
     fs_utils.remove(str(self), force=force, ignore_errors=ignore_errors)
def test_ioclass_metadata(filesystem):
    """
    Perform operations on files that cause metadata update.
    Determine if every such operation results in increased writes to cached metadata.
    Exact values may not be tested as each file system has different metadata structure.
    """
    cache, core = prepare()
    Udev.disable()

    ioclass_id = random.randint(1, ioclass_config.MAX_IO_CLASS_ID)
    # metadata IO class
    ioclass_config.add_ioclass(
        ioclass_id=ioclass_id,
        eviction_priority=1,
        allocation=True,
        rule="metadata&done",
        ioclass_config_path=ioclass_config_path,
    )
    casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)

    TestRun.LOGGER.info(f"Preparing {filesystem.name} filesystem "
                        f"and mounting {core.system_path} at {mountpoint}")
    core.create_filesystem(filesystem)
    core.mount(mountpoint)
    sync()

    requests_to_metadata_before = cache.get_statistics_deprecated(
        io_class_id=ioclass_id)["write total"]
    TestRun.LOGGER.info("Creating 20 test files")
    files = []
    for i in range(1, 21):
        file_path = f"{mountpoint}/test_file_{i}"
        dd = (Dd().input("/dev/urandom").output(file_path).count(
            random.randint(5,
                           50)).block_size(Size(1,
                                                Unit.MebiByte)).oflag("sync"))
        dd.run()
        files.append(File(file_path))

    TestRun.LOGGER.info("Checking requests to metadata")
    requests_to_metadata_after = cache.get_statistics_deprecated(
        io_class_id=ioclass_id)["write total"]
    if requests_to_metadata_after == requests_to_metadata_before:
        pytest.xfail("No requests to metadata while creating files!")

    requests_to_metadata_before = requests_to_metadata_after
    TestRun.LOGGER.info("Renaming all test files")
    for file in files:
        file.move(f"{file.full_path}_renamed")
    sync()

    TestRun.LOGGER.info("Checking requests to metadata")
    requests_to_metadata_after = cache.get_statistics_deprecated(
        io_class_id=ioclass_id)["write total"]
    if requests_to_metadata_after == requests_to_metadata_before:
        pytest.xfail("No requests to metadata while renaming files!")

    requests_to_metadata_before = requests_to_metadata_after
    test_dir_path = f"{mountpoint}/test_dir"
    TestRun.LOGGER.info(f"Creating directory {test_dir_path}")
    fs_utils.create_directory(path=test_dir_path)

    TestRun.LOGGER.info(f"Moving test files into {test_dir_path}")
    for file in files:
        file.move(test_dir_path)
    sync()

    TestRun.LOGGER.info("Checking requests to metadata")
    requests_to_metadata_after = cache.get_statistics_deprecated(
        io_class_id=ioclass_id)["write total"]
    if requests_to_metadata_after == requests_to_metadata_before:
        pytest.xfail("No requests to metadata while moving files!")

    TestRun.LOGGER.info(f"Removing {test_dir_path}")
    fs_utils.remove(path=test_dir_path, force=True, recursive=True)

    TestRun.LOGGER.info("Checking requests to metadata")
    requests_to_metadata_after = cache.get_statistics_deprecated(
        io_class_id=ioclass_id)["write total"]
    if requests_to_metadata_after == requests_to_metadata_before:
        pytest.xfail(
            "No requests to metadata while deleting directory with files!")
Exemple #11
0
def test_fs_operations():
    TestRun.LOGGER.info("Testing file system events during tracing")
    iotrace = TestRun.plugins['iotrace']

    for disk in TestRun.dut.disks:
        try:
            with TestRun.step("Create file system"):
                disk.create_filesystem(Filesystem.ext4)
            with TestRun.step("Mount device"):
                disk.mount(mountpoint)
            with TestRun.step("Start tracing"):
                iotrace.start_tracing([disk.system_path])
                time.sleep(5)
            with TestRun.step("Create test directory and file"):
                write_file(f"{mountpoint}/test_file", content="foo")
                sync()
                test_file_inode = get_inode(f"{mountpoint}/test_file")
                create_directory(f"{mountpoint}/test_dir")
                sync()
            with TestRun.step("Write to test file"):
                write_file(f"{mountpoint}/test_file",
                           overwrite=False,
                           content="bar")
                sync()
            with TestRun.step("Create new test file"):
                create_file(f"{mountpoint}/test_file2")
                test_file2_inode = get_inode(f"{mountpoint}/test_file2")
                sync()
            with TestRun.step("Move test file"):
                move(f"{mountpoint}/test_file", f"{mountpoint}/test_dir")
                sync()
            with TestRun.step("Delete test file"):
                remove(f"{mountpoint}/test_dir/test_file")
                sync()
            with TestRun.step("Stop tracing"):
                sync()
                iotrace.stop_tracing()
            with TestRun.step("Verify trace correctness"):
                trace_path = iotrace.get_latest_trace_path()
                events = iotrace.get_trace_events(trace_path)
                events_parsed = iotrace.parse_json(events)
                result = any(
                    'file' in event and event['file']['eventType'] == 'Create'
                    and event['file']['id'] == test_file2_inode
                    for event in events_parsed)
                if not result:
                    raise Exception("Could not find Create event")
                result = any(
                    'file' in event and event['file']['eventType'] == 'Delete'
                    and event['file']['id'] == test_file_inode
                    for event in events_parsed)
                if not result:
                    raise Exception("Could not find Delete event")
                result = any(
                    'file' in event and event['file']['eventType'] == 'MoveTo'
                    and event['file']['id'] == test_file_inode
                    for event in events_parsed)
                if not result:
                    raise Exception("Could not find MoveTo event")
                result = any(
                    'file' in event and event['file']['eventType'] ==
                    'MoveFrom' and event['file']['id'] == test_file_inode
                    for event in events_parsed)
                if not result:
                    raise Exception("Could not find MoveFrom event")
                result = any(
                    'file' in event and event['file']['eventType'] == 'Access'
                    and event['file']['id'] == test_file_inode
                    for event in events_parsed)
                if not result:
                    raise Exception("Could not find Access event")
        finally:
            with TestRun.step("Unmount device"):
                disk.unmount()
def test_ioclass_directory_dir_operations(filesystem):
    """
    Test if directory classification works properly after directory operations like move or rename.
    The operations themselves should not cause reclassification but IO after those operations
    should be reclassified to proper IO class.
    Directory classification may work with a delay after loading IO class configuration or
    move/rename operations. Test checks if maximum delay is not exceeded.
    """
    def create_files_with_classification_delay_check(directory: Directory,
                                                     ioclass_id: int):
        start_time = datetime.now()
        occupancy_after = cache.get_statistics_deprecated(
            io_class_id=ioclass_id)["occupancy"]
        dd_blocks = 10
        dd_size = Size(dd_blocks, Unit.Blocks4096)
        file_counter = 0
        unclassified_files = []
        time_from_start = datetime.now() - start_time
        while time_from_start < ioclass_config.MAX_CLASSIFICATION_DELAY:
            occupancy_before = occupancy_after
            file_path = f"{directory.full_path}/test_file_{file_counter}"
            file_counter += 1
            time_from_start = datetime.now() - start_time
            (Dd().input(
                "/dev/zero").output(file_path).oflag("sync").block_size(
                    Size(1, Unit.Blocks4096)).count(dd_blocks).run())
            occupancy_after = cache.get_statistics_deprecated(
                io_class_id=ioclass_id)["occupancy"]
            if occupancy_after - occupancy_before < dd_size:
                unclassified_files.append(file_path)

        if len(unclassified_files) == file_counter:
            pytest.xfail(
                "No files were properly classified within max delay time!")

        if len(unclassified_files):
            TestRun.LOGGER.info("Rewriting unclassified test files...")
            for file_path in unclassified_files:
                (Dd().input("/dev/zero").output(
                    file_path).oflag("sync").block_size(
                        Size(1, Unit.Blocks4096)).count(dd_blocks).run())

    def read_files_with_reclassification_check(target_ioclass_id: int,
                                               source_ioclass_id: int,
                                               directory: Directory,
                                               with_delay: bool):
        start_time = datetime.now()
        target_occupancy_after = cache.get_statistics_deprecated(
            io_class_id=target_ioclass_id)["occupancy"]
        source_occupancy_after = cache.get_statistics_deprecated(
            io_class_id=source_ioclass_id)["occupancy"]
        unclassified_files = []

        for file in [
                item for item in directory.ls() if isinstance(item, File)
        ]:
            target_occupancy_before = target_occupancy_after
            source_occupancy_before = source_occupancy_after
            time_from_start = datetime.now() - start_time
            (Dd().input(file.full_path).output("/dev/null").block_size(
                Size(1, Unit.Blocks4096)).run())
            target_occupancy_after = cache.get_statistics_deprecated(
                io_class_id=target_ioclass_id)["occupancy"]
            source_occupancy_after = cache.get_statistics_deprecated(
                io_class_id=source_ioclass_id)["occupancy"]
            if target_occupancy_after < target_occupancy_before:
                pytest.xfail("Target IO class occupancy lowered!")
            elif target_occupancy_after - target_occupancy_before < file.size:
                unclassified_files.append(file)
                if with_delay and time_from_start <= ioclass_config.MAX_CLASSIFICATION_DELAY:
                    continue
                pytest.xfail("Target IO class occupancy not changed properly!")
            if source_occupancy_after >= source_occupancy_before:
                if file not in unclassified_files:
                    unclassified_files.append(file)
                if with_delay and time_from_start <= ioclass_config.MAX_CLASSIFICATION_DELAY:
                    continue
                pytest.xfail("Source IO class occupancy not changed properly!")

        if len(unclassified_files):
            TestRun.LOGGER.info("Rereading unclassified test files...")
            sync()
            drop_caches(DropCachesMode.ALL)
            for file in unclassified_files:
                (Dd().input(file.full_path).output("/dev/null").block_size(
                    Size(1, Unit.Blocks4096)).run())

    cache, core = prepare()
    Udev.disable()

    proper_ids = random.sample(range(1, ioclass_config.MAX_IO_CLASS_ID + 1), 2)
    ioclass_id_1 = proper_ids[0]
    classified_dir_path_1 = f"{mountpoint}/dir_{ioclass_id_1}"
    ioclass_id_2 = proper_ids[1]
    classified_dir_path_2 = f"{mountpoint}/dir_{ioclass_id_2}"
    # directory IO classes
    ioclass_config.add_ioclass(
        ioclass_id=ioclass_id_1,
        eviction_priority=1,
        allocation=True,
        rule=f"directory:{classified_dir_path_1}",
        ioclass_config_path=ioclass_config_path,
    )
    ioclass_config.add_ioclass(
        ioclass_id=ioclass_id_2,
        eviction_priority=1,
        allocation=True,
        rule=f"directory:{classified_dir_path_2}",
        ioclass_config_path=ioclass_config_path,
    )
    casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)

    TestRun.LOGGER.info(f"Preparing {filesystem.name} filesystem "
                        f"and mounting {core.system_path} at {mountpoint}")
    core.create_filesystem(fs_type=filesystem)
    core.mount(mount_point=mountpoint)
    sync()

    non_classified_dir_path = f"{mountpoint}/non_classified"
    TestRun.LOGGER.info(
        f"Creating a non-classified directory: {non_classified_dir_path}")
    dir_1 = Directory.create_directory(path=non_classified_dir_path)

    TestRun.LOGGER.info(
        f"Renaming {non_classified_dir_path} to {classified_dir_path_1}")
    dir_1.move(destination=classified_dir_path_1)

    TestRun.LOGGER.info("Creating files with delay check")
    create_files_with_classification_delay_check(directory=dir_1,
                                                 ioclass_id=ioclass_id_1)

    TestRun.LOGGER.info(f"Creating {classified_dir_path_2}/subdir")
    dir_2 = Directory.create_directory(path=f"{classified_dir_path_2}/subdir",
                                       parents=True)

    TestRun.LOGGER.info("Creating files with delay check")
    create_files_with_classification_delay_check(directory=dir_2,
                                                 ioclass_id=ioclass_id_2)
    sync()
    drop_caches(DropCachesMode.ALL)

    TestRun.LOGGER.info(f"Moving {dir_2.full_path} to {classified_dir_path_1}")
    dir_2.move(destination=classified_dir_path_1)

    TestRun.LOGGER.info("Reading files with reclassification check")
    read_files_with_reclassification_check(target_ioclass_id=ioclass_id_1,
                                           source_ioclass_id=ioclass_id_2,
                                           directory=dir_2,
                                           with_delay=False)
    sync()
    drop_caches(DropCachesMode.ALL)

    TestRun.LOGGER.info(f"Moving {dir_2.full_path} to {mountpoint}")
    dir_2.move(destination=mountpoint)

    TestRun.LOGGER.info("Reading files with reclassification check")
    read_files_with_reclassification_check(target_ioclass_id=0,
                                           source_ioclass_id=ioclass_id_1,
                                           directory=dir_2,
                                           with_delay=False)

    TestRun.LOGGER.info(f"Removing {classified_dir_path_2}")
    fs_utils.remove(path=classified_dir_path_2, force=True, recursive=True)
    sync()
    drop_caches(DropCachesMode.ALL)

    TestRun.LOGGER.info(
        f"Renaming {classified_dir_path_1} to {classified_dir_path_2}")
    dir_1.move(destination=classified_dir_path_2)

    TestRun.LOGGER.info("Reading files with reclassification check")
    read_files_with_reclassification_check(target_ioclass_id=ioclass_id_2,
                                           source_ioclass_id=ioclass_id_1,
                                           directory=dir_1,
                                           with_delay=True)

    TestRun.LOGGER.info(
        f"Renaming {classified_dir_path_2} to {non_classified_dir_path}")
    dir_1.move(destination=non_classified_dir_path)

    TestRun.LOGGER.info("Reading files with reclassification check")
    read_files_with_reclassification_check(target_ioclass_id=0,
                                           source_ioclass_id=ioclass_id_2,
                                           directory=dir_1,
                                           with_delay=True)
def test_ioclass_directory_dir_operations(filesystem):
    """
        title: Test IO classification by directory operations.
        description: |
          Test if directory classification works properly after directory operations like move or
          rename.
        pass_criteria:
          - No kernel bug.
          - The operations themselves should not cause reclassification but IO after those
            operations should be reclassified to proper IO class.
          - Directory classification may work with a delay after loading IO class configuration or
            move/rename operations. Test checks if maximum delay is not exceeded.
    """

    non_classified_dir_path = f"{mountpoint}/non_classified"

    with TestRun.step("Prepare cache and core."):
        cache, core = prepare()
        Udev.disable()

    with TestRun.step("Create and load IO class config file."):
        proper_ids = random.sample(
            range(1, ioclass_config.MAX_IO_CLASS_ID + 1), 2)
        ioclass_id_1 = proper_ids[0]
        classified_dir_path_1 = f"{mountpoint}/dir_{ioclass_id_1}"
        ioclass_id_2 = proper_ids[1]
        classified_dir_path_2 = f"{mountpoint}/dir_{ioclass_id_2}"
        # directory IO classes
        ioclass_config.add_ioclass(
            ioclass_id=ioclass_id_1,
            eviction_priority=1,
            allocation=True,
            rule=f"directory:{classified_dir_path_1}",
            ioclass_config_path=ioclass_config_path,
        )
        ioclass_config.add_ioclass(
            ioclass_id=ioclass_id_2,
            eviction_priority=1,
            allocation=True,
            rule=f"directory:{classified_dir_path_2}",
            ioclass_config_path=ioclass_config_path,
        )
        casadm.load_io_classes(cache_id=cache.cache_id,
                               file=ioclass_config_path)

    with TestRun.step(f"Prepare {filesystem.name} filesystem "
                      f"and mount {core.system_path} at {mountpoint}."):
        core.create_filesystem(fs_type=filesystem)
        core.mount(mount_point=mountpoint)
        sync()

    with TestRun.step(
            f"Create a non-classified directory: {non_classified_dir_path}."):
        dir_1 = Directory.create_directory(path=non_classified_dir_path)

    with TestRun.step(
            f"Rename {non_classified_dir_path} to {classified_dir_path_1}."):
        dir_1.move(destination=classified_dir_path_1)

    with TestRun.step("Create files with delay check."):
        create_files_with_classification_delay_check(cache,
                                                     directory=dir_1,
                                                     ioclass_id=ioclass_id_1)

    with TestRun.step(f"Create {classified_dir_path_2}/subdir."):
        dir_2 = Directory.create_directory(
            path=f"{classified_dir_path_2}/subdir", parents=True)

    with TestRun.step("Create files with delay check."):
        create_files_with_classification_delay_check(cache,
                                                     directory=dir_2,
                                                     ioclass_id=ioclass_id_2)
        sync()
        drop_caches(DropCachesMode.ALL)

    with TestRun.step(f"Move {dir_2.full_path} to {classified_dir_path_1}."):
        dir_2.move(destination=classified_dir_path_1)

    with TestRun.step("Read files with reclassification check."):
        read_files_with_reclassification_check(cache,
                                               target_ioclass_id=ioclass_id_1,
                                               source_ioclass_id=ioclass_id_2,
                                               directory=dir_2,
                                               with_delay=False)
        sync()
        drop_caches(DropCachesMode.ALL)

    with TestRun.step(f"Move {dir_2.full_path} to {mountpoint}."):
        dir_2.move(destination=mountpoint)

    with TestRun.step("Read files with reclassification check."):
        read_files_with_reclassification_check(cache,
                                               target_ioclass_id=0,
                                               source_ioclass_id=ioclass_id_1,
                                               directory=dir_2,
                                               with_delay=False)

    with TestRun.step(f"Remove {classified_dir_path_2}."):
        fs_utils.remove(path=classified_dir_path_2, force=True, recursive=True)
        sync()
        drop_caches(DropCachesMode.ALL)

    with TestRun.step(
            f"Rename {classified_dir_path_1} to {classified_dir_path_2}."):
        dir_1.move(destination=classified_dir_path_2)

    with TestRun.step("Read files with reclassification check."):
        read_files_with_reclassification_check(cache,
                                               target_ioclass_id=ioclass_id_2,
                                               source_ioclass_id=ioclass_id_1,
                                               directory=dir_1,
                                               with_delay=True)

    with TestRun.step(
            f"Rename {classified_dir_path_2} to {non_classified_dir_path}."):
        dir_1.move(destination=non_classified_dir_path)

    with TestRun.step("Read files with reclassification check."):
        read_files_with_reclassification_check(cache,
                                               target_ioclass_id=0,
                                               source_ioclass_id=ioclass_id_2,
                                               directory=dir_1,
                                               with_delay=True)
Exemple #14
0
def test_flush_over_640_gibibytes_with_fs(cache_mode, fs):
    """
        title: Test of the ability to flush huge amount of dirty data on device with filesystem.
        description: |
          Flush cache when amount of dirty data in cache with core with filesystem exceeds 640 GiB.
        pass_criteria:
          - Flushing completes successfully without any errors.
    """
    with TestRun.step("Prepare devices for cache and core."):
        cache_dev = TestRun.disks['cache']
        check_disk_size(cache_dev)
        cache_dev.create_partitions([required_disk_size])
        cache_part = cache_dev.partitions[0]
        core_dev = TestRun.disks['core']
        check_disk_size(core_dev)
        Udev.disable()

    with TestRun.step(f"Start cache in {cache_mode} mode."):
        cache = casadm.start_cache(cache_part, cache_mode)

    with TestRun.step(
            f"Add core with {fs.name} filesystem to cache and mount it."):
        core_dev.create_filesystem(fs)
        core = cache.add_core(core_dev)
        core.mount(mnt_point)

    with TestRun.step("Disable cleaning and sequential cutoff."):
        cache.set_cleaning_policy(CleaningPolicy.nop)
        cache.set_seq_cutoff_policy(SeqCutOffPolicy.never)

    with TestRun.step("Create test file"):
        test_file_main = File.create_file("/tmp/test_file_main")
        fio = (Fio().create_command().io_engine(IoEngine.libaio).read_write(
            ReadWrite.write).block_size(bs).direct().io_depth(256).target(
                test_file_main.full_path).size(file_size))
        fio.default_run_time = timedelta(
            hours=4)  # timeout for non-time-based fio
        fio.run()
        test_file_main.refresh_item()

    with TestRun.step("Validate test file and read its md5 sum."):
        if test_file_main.size != file_size:
            TestRun.fail("Created test file hasn't reached its target size.")
        test_file_md5sum_main = test_file_main.md5sum()

    with TestRun.step("Write data to exported object."):
        test_file_copy = test_file_main.copy(mnt_point + "test_file_copy")
        test_file_copy.refresh_item()
        sync()

    with TestRun.step(f"Check if dirty data exceeded {file_size * 0.98} GiB."):
        minimum_4KiB_blocks = int(
            (file_size * 0.98).get_value(Unit.Blocks4096))
        if int(cache.get_statistics().usage_stats.dirty) < minimum_4KiB_blocks:
            TestRun.fail("There is not enough dirty data in the cache!")

    with TestRun.step("Unmount core and stop cache with flush."):
        core.unmount()
        # this operation could take few hours, depending on core disk
        output = TestRun.executor.run(stop_cmd(str(cache.cache_id)),
                                      timedelta(hours=12))
        if output.exit_code != 0:
            TestRun.fail(f"Stopping cache with flush failed!\n{output.stderr}")

    with TestRun.step(
            "Mount core device and check md5 sum of test file copy."):
        core_dev.mount(mnt_point)
        if test_file_md5sum_main != test_file_copy.md5sum():
            TestRun.LOGGER.error("Md5 sums should be equal.")

    with TestRun.step("Delete test files."):
        test_file_main.remove(True)
        test_file_copy.remove(True)

    with TestRun.step("Unmount core device."):
        core_dev.unmount()
        remove(mnt_point, True, True, True)