Exemplo n.º 1
0
    def start_monitoring(self,
                         buffer_size: Size = None,
                         number_of_subbuffers: int = None):
        if self.blktrace_pid != -1:
            raise Exception(
                f"blktrace already running with PID: {self.blktrace_pid}")

        self.__outputDirectoryPath = Directory.create_temp_directory(
        ).full_path

        drop_caches(DropCachesMode.ALL)

        number_of_subbuffers = ("" if number_of_subbuffers is None else
                                f" --num-sub-buffers={number_of_subbuffers}")
        buffer_size = (
            "" if buffer_size is None else
            f" --buffer-size={buffer_size.get_value(Unit.KibiByte)}")
        command = (
            f"blktrace{number_of_subbuffers}{buffer_size} --dev={self.device.path}"
            f"{self.masks} --output={PREFIX} --output-dir={self.__outputDirectoryPath}"
        )
        echo_output = TestRun.executor.run_expect_success(
            f"nohup {command} </dev/null &>{self.__outputDirectoryPath}/out & echo $!"
        )
        self.blktrace_pid = int(echo_output.stdout)
        TestRun.LOGGER.info(
            f"blktrace monitoring for device {self.device.path} started"
            f" (PID: {self.blktrace_pid}, output dir: {self.__outputDirectoryPath}"
        )
Exemplo n.º 2
0
def read_files_with_reclassification_check(cache, target_ioclass_id: int,
                                           source_ioclass_id: int,
                                           directory: Directory,
                                           with_delay: bool):
    start_time = datetime.now()
    target_occupancy_after = cache.get_io_class_statistics(
        io_class_id=target_ioclass_id).usage_stats.occupancy
    source_occupancy_after = cache.get_io_class_statistics(
        io_class_id=source_ioclass_id).usage_stats.occupancy
    files_to_reclassify = []
    target_ioclass_is_enabled = ioclass_is_enabled(cache, target_ioclass_id)

    for file in [item for item in directory.ls() if isinstance(item, File)]:
        target_occupancy_before = target_occupancy_after
        source_occupancy_before = source_occupancy_after
        time_from_start = datetime.now() - start_time
        dd = Dd().input(file.full_path).output("/dev/null").block_size(
            Size(1, Unit.Blocks4096))
        dd.run()
        target_occupancy_after = cache.get_io_class_statistics(
            io_class_id=target_ioclass_id).usage_stats.occupancy
        source_occupancy_after = cache.get_io_class_statistics(
            io_class_id=source_ioclass_id).usage_stats.occupancy

        if target_ioclass_is_enabled:
            if target_occupancy_after < target_occupancy_before:
                TestRun.LOGGER.error("Target IO class occupancy lowered!")
            elif target_occupancy_after - target_occupancy_before < file.size:
                files_to_reclassify.append(file)
                if with_delay and time_from_start <= ioclass_config.MAX_CLASSIFICATION_DELAY:
                    continue
                TestRun.LOGGER.error(
                    "Target IO class occupancy not changed properly!\n"
                    f"Expected: {file.size + target_occupancy_before}\n"
                    f"Actual: {target_occupancy_after}")
        elif target_occupancy_after > target_occupancy_before and with_delay:
            files_to_reclassify.append(file)

        if source_occupancy_after >= source_occupancy_before:
            if file not in files_to_reclassify:
                files_to_reclassify.append(file)
            if with_delay and time_from_start <= ioclass_config.MAX_CLASSIFICATION_DELAY:
                continue
            TestRun.LOGGER.error(
                "Source IO class occupancy not changed properly!\n"
                f"Before: {source_occupancy_before}\n"
                f"After: {source_occupancy_after}")

    if len(files_to_reclassify):
        TestRun.LOGGER.info("Rereading unclassified test files...")
        sync()
        drop_caches(DropCachesMode.ALL)
        for file in files_to_reclassify:
            (Dd().input(file.full_path).output("/dev/null").block_size(
                Size(1, Unit.Blocks4096)).run())
Exemplo n.º 3
0
def test_create_example_files():
    """
        title: Example test manipulating on filesystem.
        description: Perform various operations on filesystem.
        pass_criteria:
          - System does not crash.
          - All operations complete successfully.
          - Data consistency is being preserved.
    """
    with TestRun.step("Create file with content"):
        file1 = File.create_file("example_file")
        file1.write("Test file\ncontent line\ncontent")
    with TestRun.step("Read file content"):
        content_before_change = file1.read()
        TestRun.LOGGER.info(f"File content: {content_before_change}")
    with TestRun.step("Replace single line in file"):
        fs_utils.replace_in_lines(file1, 'content line', 'replaced line')
    with TestRun.step("Read file content and check if it changed"):
        content_after_change = file1.read()
        if content_before_change == content_after_change:
            TestRun.fail("Content didn't changed as expected")

    with TestRun.step("Make copy of the file and check if md5 sum matches"):
        file2 = file1.copy('/tmp', force=True)
        if file1.md5sum() != file2.md5sum():
            TestRun.fail("md5 sum doesn't match!")
    with TestRun.step("Change permissions of second file"):
        file2.chmod_numerical(123)
    with TestRun.step("Remove second file"):
        fs_utils.remove(file2.full_path, True)

    with TestRun.step("List contents of home directory"):
        dir1 = Directory("~")
        dir_content = dir1.ls()
    with TestRun.step("Change permissions of file"):
        file1.chmod(fs_utils.Permissions['r'] | fs_utils.Permissions['w'],
                    fs_utils.PermissionsUsers(7))
    with TestRun.step("Log home directory content"):
        for item in dir_content:
            TestRun.LOGGER.info(f"Item {str(item)} - {type(item).__name__}")
    with TestRun.step("Remove file"):
        fs_utils.remove(file1.full_path, True)
Exemplo n.º 4
0
def parse_ls_output(ls_output, dir_path=''):
    split_output = ls_output.split('\n')
    fs_items = []
    for line in split_output:
        if not line.strip():
            continue
        line_fields = line.split()
        if len(line_fields) < 8:
            continue
        file_type = line[0]
        if file_type not in ['-', 'd', 'l', 'b', 'c', 'p', 's']:
            continue
        permissions = line_fields[0][1:].replace('.', '')
        owner = line_fields[2]
        group = line_fields[3]
        size = Size(float(line_fields[4]), Unit.Byte)
        split_date = line_fields[5].split('-')
        split_time = line_fields[6].split(':')
        modification_time = datetime(int(split_date[0]), int(split_date[1]),
                                     int(split_date[2]), int(split_time[0]),
                                     int(split_time[1]), int(split_time[2]))
        if dir_path and file_type != 'l':
            full_path = '/'.join([dir_path, line_fields[7]])
        else:
            full_path = line_fields[7]

        from test_utils.filesystem.file import File, FsItem
        from test_utils.filesystem.directory import Directory
        from test_utils.filesystem.symlink import Symlink

        if file_type == '-':
            fs_item = File(full_path)
        elif file_type == 'd':
            fs_item = Directory(full_path)
        elif file_type == 'l':
            target_path = TestProperties.executor.execute(
                f"readlink -f {full_path}").stdout
            fs_item = Symlink(full_path, target_path)
        else:
            fs_item = FsItem(full_path)

        fs_item.permissions.user = Permissions['|'.join(list(permissions[:3].replace('-', '')))]\
            if permissions[:3] != '---' else Permissions(0)
        fs_item.permissions.group = Permissions['|'.join(list(permissions[3:6].replace('-', '')))]\
            if permissions[3:6] != '---' else Permissions(0)
        fs_item.permissions.other = Permissions['|'.join(list(permissions[6:].replace('-', '')))]\
            if permissions[6:] != '---' else Permissions(0)

        fs_item.owner = owner
        fs_item.group = group
        fs_item.size = size
        fs_item.modification_time = modification_time
        fs_items.append(fs_item)
    return fs_items
Exemplo n.º 5
0
def test_create_example_files(prepare_and_cleanup):
    prepare()
    TestProperties.LOGGER.info("Test run")
    file1 = File.create_file("example_file")
    file1.write("Test file\ncontent line\ncontent")
    content_before_change = file1.read()
    TestProperties.LOGGER.info(f"File content: {content_before_change}")
    fs_utils.replace_in_lines(file1, 'content line', 'replaced line')

    content_after_change = file1.read()
    assert content_before_change != content_after_change

    file2 = file1.copy('/tmp', force=True)
    assert file1.md5sum() == file2.md5sum()

    file2.chmod_numerical(123)
    fs_utils.remove(file2.full_path, True)
    dir1 = Directory("~")
    dir_content = dir1.ls()
    file1.chmod(fs_utils.Permissions['r'] | fs_utils.Permissions['w'],
                fs_utils.PermissionsUsers(7))
    for item in dir_content:
        TestProperties.LOGGER.info(f"Item {str(item)} - {type(item).__name__}")
    fs_utils.remove(file1.full_path, True)
    def read_files_with_reclassification_check(target_ioclass_id: int,
                                               source_ioclass_id: int,
                                               directory: Directory,
                                               with_delay: bool):
        start_time = datetime.now()
        target_occupancy_after = cache.get_statistics_deprecated(
            io_class_id=target_ioclass_id)["occupancy"]
        source_occupancy_after = cache.get_statistics_deprecated(
            io_class_id=source_ioclass_id)["occupancy"]
        unclassified_files = []

        for file in [
                item for item in directory.ls() if isinstance(item, File)
        ]:
            target_occupancy_before = target_occupancy_after
            source_occupancy_before = source_occupancy_after
            time_from_start = datetime.now() - start_time
            (Dd().input(file.full_path).output("/dev/null").block_size(
                Size(1, Unit.Blocks4096)).run())
            target_occupancy_after = cache.get_statistics_deprecated(
                io_class_id=target_ioclass_id)["occupancy"]
            source_occupancy_after = cache.get_statistics_deprecated(
                io_class_id=source_ioclass_id)["occupancy"]
            if target_occupancy_after < target_occupancy_before:
                pytest.xfail("Target IO class occupancy lowered!")
            elif target_occupancy_after - target_occupancy_before < file.size:
                unclassified_files.append(file)
                if with_delay and time_from_start <= ioclass_config.MAX_CLASSIFICATION_DELAY:
                    continue
                pytest.xfail("Target IO class occupancy not changed properly!")
            if source_occupancy_after >= source_occupancy_before:
                if file not in unclassified_files:
                    unclassified_files.append(file)
                if with_delay and time_from_start <= ioclass_config.MAX_CLASSIFICATION_DELAY:
                    continue
                pytest.xfail("Source IO class occupancy not changed properly!")

        if len(unclassified_files):
            TestRun.LOGGER.info("Rereading unclassified test files...")
            sync()
            drop_caches(DropCachesMode.ALL)
            for file in unclassified_files:
                (Dd().input(file.full_path).output("/dev/null").block_size(
                    Size(1, Unit.Blocks4096)).run())
def test_ioclass_directory_file_operations(filesystem):
    """
    Test if directory classification works properly after file operations like move or rename.
    The operations themselves should not cause reclassification but IO after those operations
    should be reclassified to proper IO class.
    """
    def check_occupancy(expected: Size, actual: Size):
        if expected != actual:
            pytest.xfail("Occupancy check failed!\n"
                         f"Expected: {expected}, actual: {actual}")

    cache, core = prepare()
    Udev.disable()
    test_dir_path = f"{mountpoint}/test_dir"
    nested_dir_path = f"{test_dir_path}/nested_dir"

    dd_blocks = random.randint(5, 50)

    ioclass_id = random.randint(1, ioclass_config.MAX_IO_CLASS_ID)
    # directory IO class
    ioclass_config.add_ioclass(
        ioclass_id=ioclass_id,
        eviction_priority=1,
        allocation=True,
        rule=f"directory:{test_dir_path}",
        ioclass_config_path=ioclass_config_path,
    )
    casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)

    TestRun.LOGGER.info(f"Preparing {filesystem.name} filesystem "
                        f"and mounting {core.system_path} at {mountpoint}")
    core.create_filesystem(fs_type=filesystem)
    core.mount(mount_point=mountpoint)
    sync()

    TestRun.LOGGER.info(f"Creating directory {nested_dir_path}")
    Directory.create_directory(path=nested_dir_path, parents=True)
    sync()
    drop_caches(DropCachesMode.ALL)

    TestRun.LOGGER.info("Creating test file")
    classified_before = cache.get_statistics_deprecated(
        io_class_id=ioclass_id)["occupancy"]
    file_path = f"{test_dir_path}/test_file"
    (Dd().input("/dev/urandom").output(file_path).oflag("sync").block_size(
        Size(1, Unit.MebiByte)).count(dd_blocks).run())
    sync()
    drop_caches(DropCachesMode.ALL)
    test_file = File(file_path).refresh_item()

    TestRun.LOGGER.info("Checking classified occupancy")
    classified_after = cache.get_statistics_deprecated(
        io_class_id=ioclass_id)["occupancy"]
    check_occupancy(classified_before + test_file.size, classified_after)

    TestRun.LOGGER.info("Moving test file out of classified directory")
    classified_before = classified_after
    non_classified_before = cache.get_statistics_deprecated(
        io_class_id=0)["occupancy"]
    test_file.move(destination=mountpoint)
    sync()
    drop_caches(DropCachesMode.ALL)

    TestRun.LOGGER.info("Checking classified occupancy")
    classified_after = cache.get_statistics_deprecated(
        io_class_id=ioclass_id)["occupancy"]
    check_occupancy(classified_before, classified_after)
    TestRun.LOGGER.info("Checking non-classified occupancy")
    non_classified_after = cache.get_statistics_deprecated(
        io_class_id=0)["occupancy"]
    check_occupancy(non_classified_before, non_classified_after)

    TestRun.LOGGER.info("Reading test file")
    classified_before = classified_after
    non_classified_before = non_classified_after
    (Dd().input(test_file.full_path).output("/dev/null").block_size(
        Size(1, Unit.MebiByte)).run())

    TestRun.LOGGER.info("Checking classified occupancy")
    classified_after = cache.get_statistics_deprecated(
        io_class_id=ioclass_id)["occupancy"]
    check_occupancy(classified_before - test_file.size, classified_after)
    TestRun.LOGGER.info("Checking non-classified occupancy")
    non_classified_after = cache.get_statistics_deprecated(
        io_class_id=0)["occupancy"]
    check_occupancy(non_classified_before + test_file.size,
                    non_classified_after)

    TestRun.LOGGER.info(f"Moving test file to {nested_dir_path}")
    classified_before = classified_after
    non_classified_before = non_classified_after
    test_file.move(destination=nested_dir_path)
    sync()
    drop_caches(DropCachesMode.ALL)

    TestRun.LOGGER.info("Checking classified occupancy")
    classified_after = cache.get_statistics_deprecated(
        io_class_id=ioclass_id)["occupancy"]
    check_occupancy(classified_before, classified_after)
    TestRun.LOGGER.info("Checking non-classified occupancy")
    non_classified_after = cache.get_statistics_deprecated(
        io_class_id=0)["occupancy"]
    check_occupancy(non_classified_before, non_classified_after)

    TestRun.LOGGER.info("Reading test file")
    classified_before = classified_after
    non_classified_before = non_classified_after
    (Dd().input(test_file.full_path).output("/dev/null").block_size(
        Size(1, Unit.MebiByte)).run())

    TestRun.LOGGER.info("Checking classified occupancy")
    classified_after = cache.get_statistics_deprecated(
        io_class_id=ioclass_id)["occupancy"]
    check_occupancy(classified_before + test_file.size, classified_after)
    TestRun.LOGGER.info("Checking non-classified occupancy")
    non_classified_after = cache.get_statistics_deprecated(
        io_class_id=0)["occupancy"]
    check_occupancy(non_classified_before - test_file.size,
                    non_classified_after)
def test_ioclass_directory_dir_operations(filesystem):
    """
    Test if directory classification works properly after directory operations like move or rename.
    The operations themselves should not cause reclassification but IO after those operations
    should be reclassified to proper IO class.
    Directory classification may work with a delay after loading IO class configuration or
    move/rename operations. Test checks if maximum delay is not exceeded.
    """
    def create_files_with_classification_delay_check(directory: Directory,
                                                     ioclass_id: int):
        start_time = datetime.now()
        occupancy_after = cache.get_statistics_deprecated(
            io_class_id=ioclass_id)["occupancy"]
        dd_blocks = 10
        dd_size = Size(dd_blocks, Unit.Blocks4096)
        file_counter = 0
        unclassified_files = []
        time_from_start = datetime.now() - start_time
        while time_from_start < ioclass_config.MAX_CLASSIFICATION_DELAY:
            occupancy_before = occupancy_after
            file_path = f"{directory.full_path}/test_file_{file_counter}"
            file_counter += 1
            time_from_start = datetime.now() - start_time
            (Dd().input(
                "/dev/zero").output(file_path).oflag("sync").block_size(
                    Size(1, Unit.Blocks4096)).count(dd_blocks).run())
            occupancy_after = cache.get_statistics_deprecated(
                io_class_id=ioclass_id)["occupancy"]
            if occupancy_after - occupancy_before < dd_size:
                unclassified_files.append(file_path)

        if len(unclassified_files) == file_counter:
            pytest.xfail(
                "No files were properly classified within max delay time!")

        if len(unclassified_files):
            TestRun.LOGGER.info("Rewriting unclassified test files...")
            for file_path in unclassified_files:
                (Dd().input("/dev/zero").output(
                    file_path).oflag("sync").block_size(
                        Size(1, Unit.Blocks4096)).count(dd_blocks).run())

    def read_files_with_reclassification_check(target_ioclass_id: int,
                                               source_ioclass_id: int,
                                               directory: Directory,
                                               with_delay: bool):
        start_time = datetime.now()
        target_occupancy_after = cache.get_statistics_deprecated(
            io_class_id=target_ioclass_id)["occupancy"]
        source_occupancy_after = cache.get_statistics_deprecated(
            io_class_id=source_ioclass_id)["occupancy"]
        unclassified_files = []

        for file in [
                item for item in directory.ls() if isinstance(item, File)
        ]:
            target_occupancy_before = target_occupancy_after
            source_occupancy_before = source_occupancy_after
            time_from_start = datetime.now() - start_time
            (Dd().input(file.full_path).output("/dev/null").block_size(
                Size(1, Unit.Blocks4096)).run())
            target_occupancy_after = cache.get_statistics_deprecated(
                io_class_id=target_ioclass_id)["occupancy"]
            source_occupancy_after = cache.get_statistics_deprecated(
                io_class_id=source_ioclass_id)["occupancy"]
            if target_occupancy_after < target_occupancy_before:
                pytest.xfail("Target IO class occupancy lowered!")
            elif target_occupancy_after - target_occupancy_before < file.size:
                unclassified_files.append(file)
                if with_delay and time_from_start <= ioclass_config.MAX_CLASSIFICATION_DELAY:
                    continue
                pytest.xfail("Target IO class occupancy not changed properly!")
            if source_occupancy_after >= source_occupancy_before:
                if file not in unclassified_files:
                    unclassified_files.append(file)
                if with_delay and time_from_start <= ioclass_config.MAX_CLASSIFICATION_DELAY:
                    continue
                pytest.xfail("Source IO class occupancy not changed properly!")

        if len(unclassified_files):
            TestRun.LOGGER.info("Rereading unclassified test files...")
            sync()
            drop_caches(DropCachesMode.ALL)
            for file in unclassified_files:
                (Dd().input(file.full_path).output("/dev/null").block_size(
                    Size(1, Unit.Blocks4096)).run())

    cache, core = prepare()
    Udev.disable()

    proper_ids = random.sample(range(1, ioclass_config.MAX_IO_CLASS_ID + 1), 2)
    ioclass_id_1 = proper_ids[0]
    classified_dir_path_1 = f"{mountpoint}/dir_{ioclass_id_1}"
    ioclass_id_2 = proper_ids[1]
    classified_dir_path_2 = f"{mountpoint}/dir_{ioclass_id_2}"
    # directory IO classes
    ioclass_config.add_ioclass(
        ioclass_id=ioclass_id_1,
        eviction_priority=1,
        allocation=True,
        rule=f"directory:{classified_dir_path_1}",
        ioclass_config_path=ioclass_config_path,
    )
    ioclass_config.add_ioclass(
        ioclass_id=ioclass_id_2,
        eviction_priority=1,
        allocation=True,
        rule=f"directory:{classified_dir_path_2}",
        ioclass_config_path=ioclass_config_path,
    )
    casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)

    TestRun.LOGGER.info(f"Preparing {filesystem.name} filesystem "
                        f"and mounting {core.system_path} at {mountpoint}")
    core.create_filesystem(fs_type=filesystem)
    core.mount(mount_point=mountpoint)
    sync()

    non_classified_dir_path = f"{mountpoint}/non_classified"
    TestRun.LOGGER.info(
        f"Creating a non-classified directory: {non_classified_dir_path}")
    dir_1 = Directory.create_directory(path=non_classified_dir_path)

    TestRun.LOGGER.info(
        f"Renaming {non_classified_dir_path} to {classified_dir_path_1}")
    dir_1.move(destination=classified_dir_path_1)

    TestRun.LOGGER.info("Creating files with delay check")
    create_files_with_classification_delay_check(directory=dir_1,
                                                 ioclass_id=ioclass_id_1)

    TestRun.LOGGER.info(f"Creating {classified_dir_path_2}/subdir")
    dir_2 = Directory.create_directory(path=f"{classified_dir_path_2}/subdir",
                                       parents=True)

    TestRun.LOGGER.info("Creating files with delay check")
    create_files_with_classification_delay_check(directory=dir_2,
                                                 ioclass_id=ioclass_id_2)
    sync()
    drop_caches(DropCachesMode.ALL)

    TestRun.LOGGER.info(f"Moving {dir_2.full_path} to {classified_dir_path_1}")
    dir_2.move(destination=classified_dir_path_1)

    TestRun.LOGGER.info("Reading files with reclassification check")
    read_files_with_reclassification_check(target_ioclass_id=ioclass_id_1,
                                           source_ioclass_id=ioclass_id_2,
                                           directory=dir_2,
                                           with_delay=False)
    sync()
    drop_caches(DropCachesMode.ALL)

    TestRun.LOGGER.info(f"Moving {dir_2.full_path} to {mountpoint}")
    dir_2.move(destination=mountpoint)

    TestRun.LOGGER.info("Reading files with reclassification check")
    read_files_with_reclassification_check(target_ioclass_id=0,
                                           source_ioclass_id=ioclass_id_1,
                                           directory=dir_2,
                                           with_delay=False)

    TestRun.LOGGER.info(f"Removing {classified_dir_path_2}")
    fs_utils.remove(path=classified_dir_path_2, force=True, recursive=True)
    sync()
    drop_caches(DropCachesMode.ALL)

    TestRun.LOGGER.info(
        f"Renaming {classified_dir_path_1} to {classified_dir_path_2}")
    dir_1.move(destination=classified_dir_path_2)

    TestRun.LOGGER.info("Reading files with reclassification check")
    read_files_with_reclassification_check(target_ioclass_id=ioclass_id_2,
                                           source_ioclass_id=ioclass_id_1,
                                           directory=dir_1,
                                           with_delay=True)

    TestRun.LOGGER.info(
        f"Renaming {classified_dir_path_2} to {non_classified_dir_path}")
    dir_1.move(destination=non_classified_dir_path)

    TestRun.LOGGER.info("Reading files with reclassification check")
    read_files_with_reclassification_check(target_ioclass_id=0,
                                           source_ioclass_id=ioclass_id_2,
                                           directory=dir_1,
                                           with_delay=True)
def test_ioclass_directory_dir_operations(filesystem):
    """
        title: Test IO classification by directory operations.
        description: |
          Test if directory classification works properly after directory operations like move or
          rename.
        pass_criteria:
          - No kernel bug.
          - The operations themselves should not cause reclassification but IO after those
            operations should be reclassified to proper IO class.
          - Directory classification may work with a delay after loading IO class configuration or
            move/rename operations. Test checks if maximum delay is not exceeded.
    """

    non_classified_dir_path = f"{mountpoint}/non_classified"

    with TestRun.step("Prepare cache and core."):
        cache, core = prepare()
        Udev.disable()

    with TestRun.step("Create and load IO class config file."):
        proper_ids = random.sample(
            range(1, ioclass_config.MAX_IO_CLASS_ID + 1), 2)
        ioclass_id_1 = proper_ids[0]
        classified_dir_path_1 = f"{mountpoint}/dir_{ioclass_id_1}"
        ioclass_id_2 = proper_ids[1]
        classified_dir_path_2 = f"{mountpoint}/dir_{ioclass_id_2}"
        # directory IO classes
        ioclass_config.add_ioclass(
            ioclass_id=ioclass_id_1,
            eviction_priority=1,
            allocation=True,
            rule=f"directory:{classified_dir_path_1}",
            ioclass_config_path=ioclass_config_path,
        )
        ioclass_config.add_ioclass(
            ioclass_id=ioclass_id_2,
            eviction_priority=1,
            allocation=True,
            rule=f"directory:{classified_dir_path_2}",
            ioclass_config_path=ioclass_config_path,
        )
        casadm.load_io_classes(cache_id=cache.cache_id,
                               file=ioclass_config_path)

    with TestRun.step(f"Prepare {filesystem.name} filesystem "
                      f"and mount {core.system_path} at {mountpoint}."):
        core.create_filesystem(fs_type=filesystem)
        core.mount(mount_point=mountpoint)
        sync()

    with TestRun.step(
            f"Create a non-classified directory: {non_classified_dir_path}."):
        dir_1 = Directory.create_directory(path=non_classified_dir_path)

    with TestRun.step(
            f"Rename {non_classified_dir_path} to {classified_dir_path_1}."):
        dir_1.move(destination=classified_dir_path_1)

    with TestRun.step("Create files with delay check."):
        create_files_with_classification_delay_check(cache,
                                                     directory=dir_1,
                                                     ioclass_id=ioclass_id_1)

    with TestRun.step(f"Create {classified_dir_path_2}/subdir."):
        dir_2 = Directory.create_directory(
            path=f"{classified_dir_path_2}/subdir", parents=True)

    with TestRun.step("Create files with delay check."):
        create_files_with_classification_delay_check(cache,
                                                     directory=dir_2,
                                                     ioclass_id=ioclass_id_2)
        sync()
        drop_caches(DropCachesMode.ALL)

    with TestRun.step(f"Move {dir_2.full_path} to {classified_dir_path_1}."):
        dir_2.move(destination=classified_dir_path_1)

    with TestRun.step("Read files with reclassification check."):
        read_files_with_reclassification_check(cache,
                                               target_ioclass_id=ioclass_id_1,
                                               source_ioclass_id=ioclass_id_2,
                                               directory=dir_2,
                                               with_delay=False)
        sync()
        drop_caches(DropCachesMode.ALL)

    with TestRun.step(f"Move {dir_2.full_path} to {mountpoint}."):
        dir_2.move(destination=mountpoint)

    with TestRun.step("Read files with reclassification check."):
        read_files_with_reclassification_check(cache,
                                               target_ioclass_id=0,
                                               source_ioclass_id=ioclass_id_1,
                                               directory=dir_2,
                                               with_delay=False)

    with TestRun.step(f"Remove {classified_dir_path_2}."):
        fs_utils.remove(path=classified_dir_path_2, force=True, recursive=True)
        sync()
        drop_caches(DropCachesMode.ALL)

    with TestRun.step(
            f"Rename {classified_dir_path_1} to {classified_dir_path_2}."):
        dir_1.move(destination=classified_dir_path_2)

    with TestRun.step("Read files with reclassification check."):
        read_files_with_reclassification_check(cache,
                                               target_ioclass_id=ioclass_id_2,
                                               source_ioclass_id=ioclass_id_1,
                                               directory=dir_1,
                                               with_delay=True)

    with TestRun.step(
            f"Rename {classified_dir_path_2} to {non_classified_dir_path}."):
        dir_1.move(destination=non_classified_dir_path)

    with TestRun.step("Read files with reclassification check."):
        read_files_with_reclassification_check(cache,
                                               target_ioclass_id=0,
                                               source_ioclass_id=ioclass_id_2,
                                               directory=dir_1,
                                               with_delay=True)
def test_ioclass_directory_file_operations(filesystem):
    """
        title: Test IO classification by file operations.
        description: |
          Test if directory classification works properly after file operations like move or rename.
        pass_criteria:
          - No kernel bug.
          - The operations themselves should not cause reclassification but IO after those
            operations should be reclassified to proper IO class.
    """

    test_dir_path = f"{mountpoint}/test_dir"
    nested_dir_path = f"{test_dir_path}/nested_dir"
    dd_blocks = random.randint(5, 50)

    with TestRun.step("Prepare cache and core."):
        cache, core = prepare()
        Udev.disable()

    with TestRun.step("Create and load IO class config file."):
        ioclass_id = random.randint(1, ioclass_config.MAX_IO_CLASS_ID)
        # directory IO class
        ioclass_config.add_ioclass(
            ioclass_id=ioclass_id,
            eviction_priority=1,
            allocation=True,
            rule=f"directory:{test_dir_path}",
            ioclass_config_path=ioclass_config_path,
        )
        casadm.load_io_classes(cache_id=cache.cache_id,
                               file=ioclass_config_path)

    with TestRun.step(f"Prepare {filesystem.name} filesystem "
                      f"and mounting {core.system_path} at {mountpoint}."):
        core.create_filesystem(fs_type=filesystem)
        core.mount(mount_point=mountpoint)
        sync()

    with TestRun.step(f"Create directory {nested_dir_path}."):
        Directory.create_directory(path=nested_dir_path, parents=True)
        sync()
        drop_caches(DropCachesMode.ALL)

    with TestRun.step("Create test file."):
        classified_before = cache.get_io_class_statistics(
            io_class_id=ioclass_id).usage_stats.occupancy
        file_path = f"{test_dir_path}/test_file"
        (Dd().input("/dev/urandom").output(file_path).oflag("sync").block_size(
            Size(1, Unit.MebiByte)).count(dd_blocks).run())
        sync()
        drop_caches(DropCachesMode.ALL)
        test_file = File(file_path).refresh_item()

    with TestRun.step("Check classified occupancy."):
        classified_after = cache.get_io_class_statistics(
            io_class_id=ioclass_id).usage_stats.occupancy
        check_occupancy(classified_before + test_file.size, classified_after)

    with TestRun.step("Move test file out of classified directory."):
        classified_before = classified_after
        non_classified_before = cache.get_io_class_statistics(
            io_class_id=0).usage_stats.occupancy
        test_file.move(destination=mountpoint)
        sync()
        drop_caches(DropCachesMode.ALL)

    with TestRun.step("Check classified occupancy."):
        classified_after = cache.get_io_class_statistics(
            io_class_id=ioclass_id).usage_stats.occupancy
        check_occupancy(classified_before, classified_after)
        TestRun.LOGGER.info("Checking non-classified occupancy")
        non_classified_after = cache.get_io_class_statistics(
            io_class_id=0).usage_stats.occupancy
        check_occupancy(non_classified_before, non_classified_after)

    with TestRun.step("Read test file."):
        classified_before = classified_after
        non_classified_before = non_classified_after
        (Dd().input(test_file.full_path).output("/dev/null").block_size(
            Size(1, Unit.MebiByte)).run())

    with TestRun.step("Check classified occupancy."):
        classified_after = cache.get_io_class_statistics(
            io_class_id=ioclass_id).usage_stats.occupancy
        check_occupancy(classified_before - test_file.size, classified_after)
        TestRun.LOGGER.info("Checking non-classified occupancy")
        non_classified_after = cache.get_io_class_statistics(
            io_class_id=0).usage_stats.occupancy
        check_occupancy(non_classified_before + test_file.size,
                        non_classified_after)

    with TestRun.step(f"Move test file to {nested_dir_path}."):
        classified_before = classified_after
        non_classified_before = non_classified_after
        test_file.move(destination=nested_dir_path)
        sync()
        drop_caches(DropCachesMode.ALL)

    with TestRun.step("Check classified occupancy."):
        classified_after = cache.get_io_class_statistics(
            io_class_id=ioclass_id).usage_stats.occupancy
        check_occupancy(classified_before, classified_after)
        TestRun.LOGGER.info("Checking non-classified occupancy")
        non_classified_after = cache.get_io_class_statistics(
            io_class_id=0).usage_stats.occupancy
        check_occupancy(non_classified_before, non_classified_after)

    with TestRun.step("Read test file."):
        classified_before = classified_after
        non_classified_before = non_classified_after
        (Dd().input(test_file.full_path).output("/dev/null").block_size(
            Size(1, Unit.MebiByte)).run())

    with TestRun.step("Check classified occupancy."):
        classified_after = cache.get_io_class_statistics(
            io_class_id=ioclass_id).usage_stats.occupancy
        check_occupancy(classified_before + test_file.size, classified_after)

    with TestRun.step("Check non-classified occupancy."):
        non_classified_after = cache.get_io_class_statistics(
            io_class_id=0).usage_stats.occupancy
        check_occupancy(non_classified_before - test_file.size,
                        non_classified_after)
Exemplo n.º 11
0
async def test_data_integrity_unplug(cache_mode):
    """
        title: Test if data integrity is maintained in a power failure scenario.
        description: |
          The test checks if the data written to the cache device is saved correctly in a power
          failure scenario, which is simulated by unplugging the cache device.
          FIO is interrupted when the cache device is unplugged. The test determines how many
          writes each FIO job was able to perform before the unplug and then checks if the data
          on the cache device matches FIO output up to the unplug (bearing in mind that the last
          write might have been interrupted).
        pass_criteria:
          - No system crash.
          - Data on the cache device are consistent with the data sent from FIO.
    """
    global fio_seed, tmp_dir, ram_disk
    cache_dev = TestRun.disks["cache"]
    core_dev = TestRun.disks["core"]

    sleep_max_s = timedelta(seconds=10)

    with TestRun.step("Test prepare"):
        random.seed(TestRun.random_seed)
        fio_seed = random.randint(0, 2**32)
        TestRun.LOGGER.info(f"FIO seed: {fio_seed}")
        tmp_dir = Directory.create_temp_directory()
        TestRun.LOGGER.info(f"Temporary directory: {tmp_dir.full_path}")
        ram_disk = RamDisk.create(Size(1, Unit.GiB), 1)[0]

        # csums[j][i] is csum for i-th io of j-th job
        csums = [{} for _ in range(num_jobs)]

    with TestRun.step("Test iterations:"):
        for cache_line_size in TestRun.iteration(CacheLineSize):
            with TestRun.step("Prefill the core device."):
                write_device(core_dev.path)
                data_prefill_cs = read_device_md5s(core_dev.path)

            # csums_rev is a reverse mapping to identify job, sector and seqno of I/O
            # with given csum
            csums_rev = {}
            for j in range(num_jobs):
                for b in range(job_workset_blocks):
                    cs = data_prefill_cs[j][b]
                    csums_rev[cs] = get_data_name(j, b, -1)

            with TestRun.step(
                    "Start a cache, add a core and set cache cleaning policy to NOP"
            ):
                cache = casadm.start_cache(cache_dev,
                                           cache_mode,
                                           cache_line_size,
                                           force=True)
                exported_object = cache.add_core(core_dev)
                cache.set_cleaning_policy(CleaningPolicy.nop)

            with TestRun.step("Start FIO to the exported object"):
                fio = prepare_base_fio() \
                    .target(exported_object.path) \
                    .run_time(100 * sleep_max_s)
                for i in range(num_jobs):
                    fio.add_job(f"di_{i}") \
                       .offset(job_workset_size * i) \
                       .io_size(Size(100, Unit.GiB))

                fio_task = start_async_func(fio.fio.run)

            with TestRun.step("Hot unplug the cache device after random time"):
                wait_time_s = random.randint(5,
                                             int(sleep_max_s.total_seconds()))
                sleep(wait_time_s)
                cache_dev.unplug()

            with TestRun.step("Analyze FIO execution after hot unplug"):
                fio_output = await fio_task
                if fio_output.exit_code == 0:
                    TestRun.LOGGER.warning(
                        "Unexpectedly successful fio - check if the device was unplugged correctly."
                    )
                results = fio.get_results(
                    TestRun.executor.run(f"cat {fio.fio.fio_file}").stdout)
                ios = [r.job.write.total_ios for r in results]

            with TestRun.step("Stop cache without flushing data"):
                try:
                    cache.stop(no_data_flush=True)
                except CmdException as e:
                    if not cli_messages.check_stderr_msg(
                            e.output, cli_messages.stop_cache_errors):
                        raise

            with TestRun.step("Plug back the cache device"):
                cache_dev.plug()

            with TestRun.step("Load cache"):
                cache = casadm.load_cache(cache_dev)

            with TestRun.step("Check data"):
                csums_actual = read_device_md5s(exported_object.path)

                # The last I/O in each job is interrupted by the unplug. It could have made it
                # to the medium or not. So the last I/O we expect to actually hit the disk
                # is 'num_io-2' or 'num_io-1' for each job. Below 'n1_' refers to 'num_io-1'
                # and 'n2_' refers to 'num_io-2'

                # seqno[j] is the last I/O seqno for given job (entire workset)
                n2_seqno = [io - 2 for io in ios]
                n1_seqno = [io - 1 for io in ios]

                # pattern[j][b] is the last I/O seqno for job j block b
                n2_pattern = get_pattern(n2_seqno)
                n1_pattern = get_pattern(n1_seqno)

                # Make sure we know data checksums for I/O that we expect to have
                # been committed assuming either n2_seqno or n1_seqno is the last
                # I/O committed by each job.
                gen_csums(ram_disk.path, n1_seqno, n1_pattern, csums,
                          csums_rev)
                gen_csums(ram_disk.path, n2_seqno, n2_pattern, csums,
                          csums_rev)

                fail = False
                for j in range(num_jobs):
                    for b in range(job_workset_blocks):
                        # possible checksums assuming n2_pattern or n1_pattern
                        cs_n2 = get_data_csum(j, b, n2_pattern,
                                              data_prefill_cs, csums)
                        cs_n1 = get_data_csum(j, b, n1_pattern,
                                              data_prefill_cs, csums)

                        # actual checksum read from CAS
                        cs_actual = csums_actual[j][b]

                        if cs_actual != cs_n2 and cs_actual != cs_n1:
                            fail = True

                            # attempt to identify erroneous data by comparing its checksum
                            # against the known checksums
                            identity = csums_rev[cs_actual] if cs_actual in csums_rev else \
                                f"UNKNOWN ({cs_actual[:8]})"

                            TestRun.LOGGER.error(
                                f"MISMATCH job {j} block {b} contains {identity} "
                                f"expected {get_data_name(j, b, n2_pattern[j][b])} "
                                f"or {get_data_name(j, b, n1_pattern[j][b]) }")

                if fail:
                    break

                cache.stop(no_data_flush=True)